1 | /* |
2 | * QEMU emulation of an Intel IOMMU (VT-d) |
3 | * (DMA Remapping device) |
4 | * |
5 | * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com> |
6 | * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation; either version 2 of the License, or |
11 | * (at your option) any later version. |
12 | |
13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. |
17 | |
18 | * You should have received a copy of the GNU General Public License along |
19 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
20 | */ |
21 | |
22 | #include "qemu/osdep.h" |
23 | #include "qemu/error-report.h" |
24 | #include "qemu/main-loop.h" |
25 | #include "qapi/error.h" |
26 | #include "hw/sysbus.h" |
27 | #include "exec/address-spaces.h" |
28 | #include "intel_iommu_internal.h" |
29 | #include "hw/pci/pci.h" |
30 | #include "hw/pci/pci_bus.h" |
31 | #include "hw/qdev-properties.h" |
32 | #include "hw/i386/pc.h" |
33 | #include "hw/i386/apic-msidef.h" |
34 | #include "hw/boards.h" |
35 | #include "hw/i386/x86-iommu.h" |
36 | #include "hw/pci-host/q35.h" |
37 | #include "sysemu/kvm.h" |
38 | #include "hw/i386/apic_internal.h" |
39 | #include "kvm_i386.h" |
40 | #include "migration/vmstate.h" |
41 | #include "trace.h" |
42 | |
43 | /* context entry operations */ |
44 | #define VTD_CE_GET_RID2PASID(ce) \ |
45 | ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK) |
46 | #define VTD_CE_GET_PASID_DIR_TABLE(ce) \ |
47 | ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK) |
48 | |
49 | /* pe operations */ |
50 | #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT) |
51 | #define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW)) |
52 | #define VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write) {\ |
53 | if (ret_fr) { \ |
54 | ret_fr = -ret_fr; \ |
55 | if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { \ |
56 | trace_vtd_fault_disabled(); \ |
57 | } else { \ |
58 | vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); \ |
59 | } \ |
60 | goto error; \ |
61 | } \ |
62 | } |
63 | |
64 | static void vtd_address_space_refresh_all(IntelIOMMUState *s); |
65 | static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n); |
66 | |
67 | static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val, |
68 | uint64_t wmask, uint64_t w1cmask) |
69 | { |
70 | stq_le_p(&s->csr[addr], val); |
71 | stq_le_p(&s->wmask[addr], wmask); |
72 | stq_le_p(&s->w1cmask[addr], w1cmask); |
73 | } |
74 | |
75 | static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask) |
76 | { |
77 | stq_le_p(&s->womask[addr], mask); |
78 | } |
79 | |
80 | static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val, |
81 | uint32_t wmask, uint32_t w1cmask) |
82 | { |
83 | stl_le_p(&s->csr[addr], val); |
84 | stl_le_p(&s->wmask[addr], wmask); |
85 | stl_le_p(&s->w1cmask[addr], w1cmask); |
86 | } |
87 | |
88 | static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask) |
89 | { |
90 | stl_le_p(&s->womask[addr], mask); |
91 | } |
92 | |
93 | /* "External" get/set operations */ |
94 | static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val) |
95 | { |
96 | uint64_t oldval = ldq_le_p(&s->csr[addr]); |
97 | uint64_t wmask = ldq_le_p(&s->wmask[addr]); |
98 | uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); |
99 | stq_le_p(&s->csr[addr], |
100 | ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); |
101 | } |
102 | |
103 | static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val) |
104 | { |
105 | uint32_t oldval = ldl_le_p(&s->csr[addr]); |
106 | uint32_t wmask = ldl_le_p(&s->wmask[addr]); |
107 | uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); |
108 | stl_le_p(&s->csr[addr], |
109 | ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); |
110 | } |
111 | |
112 | static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr) |
113 | { |
114 | uint64_t val = ldq_le_p(&s->csr[addr]); |
115 | uint64_t womask = ldq_le_p(&s->womask[addr]); |
116 | return val & ~womask; |
117 | } |
118 | |
119 | static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr) |
120 | { |
121 | uint32_t val = ldl_le_p(&s->csr[addr]); |
122 | uint32_t womask = ldl_le_p(&s->womask[addr]); |
123 | return val & ~womask; |
124 | } |
125 | |
126 | /* "Internal" get/set operations */ |
127 | static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr) |
128 | { |
129 | return ldq_le_p(&s->csr[addr]); |
130 | } |
131 | |
132 | static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr) |
133 | { |
134 | return ldl_le_p(&s->csr[addr]); |
135 | } |
136 | |
137 | static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val) |
138 | { |
139 | stq_le_p(&s->csr[addr], val); |
140 | } |
141 | |
142 | static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr, |
143 | uint32_t clear, uint32_t mask) |
144 | { |
145 | uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask; |
146 | stl_le_p(&s->csr[addr], new_val); |
147 | return new_val; |
148 | } |
149 | |
150 | static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr, |
151 | uint64_t clear, uint64_t mask) |
152 | { |
153 | uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask; |
154 | stq_le_p(&s->csr[addr], new_val); |
155 | return new_val; |
156 | } |
157 | |
158 | static inline void vtd_iommu_lock(IntelIOMMUState *s) |
159 | { |
160 | qemu_mutex_lock(&s->iommu_lock); |
161 | } |
162 | |
163 | static inline void vtd_iommu_unlock(IntelIOMMUState *s) |
164 | { |
165 | qemu_mutex_unlock(&s->iommu_lock); |
166 | } |
167 | |
168 | static void vtd_update_scalable_state(IntelIOMMUState *s) |
169 | { |
170 | uint64_t val = vtd_get_quad_raw(s, DMAR_RTADDR_REG); |
171 | |
172 | if (s->scalable_mode) { |
173 | s->root_scalable = val & VTD_RTADDR_SMT; |
174 | } |
175 | } |
176 | |
177 | /* Whether the address space needs to notify new mappings */ |
178 | static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as) |
179 | { |
180 | return as->notifier_flags & IOMMU_NOTIFIER_MAP; |
181 | } |
182 | |
183 | /* GHashTable functions */ |
184 | static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2) |
185 | { |
186 | return *((const uint64_t *)v1) == *((const uint64_t *)v2); |
187 | } |
188 | |
189 | static guint vtd_uint64_hash(gconstpointer v) |
190 | { |
191 | return (guint)*(const uint64_t *)v; |
192 | } |
193 | |
194 | static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value, |
195 | gpointer user_data) |
196 | { |
197 | VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; |
198 | uint16_t domain_id = *(uint16_t *)user_data; |
199 | return entry->domain_id == domain_id; |
200 | } |
201 | |
202 | /* The shift of an addr for a certain level of paging structure */ |
203 | static inline uint32_t vtd_slpt_level_shift(uint32_t level) |
204 | { |
205 | assert(level != 0); |
206 | return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS; |
207 | } |
208 | |
209 | static inline uint64_t vtd_slpt_level_page_mask(uint32_t level) |
210 | { |
211 | return ~((1ULL << vtd_slpt_level_shift(level)) - 1); |
212 | } |
213 | |
214 | static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, |
215 | gpointer user_data) |
216 | { |
217 | VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; |
218 | VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; |
219 | uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; |
220 | uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; |
221 | return (entry->domain_id == info->domain_id) && |
222 | (((entry->gfn & info->mask) == gfn) || |
223 | (entry->gfn == gfn_tlb)); |
224 | } |
225 | |
226 | /* Reset all the gen of VTDAddressSpace to zero and set the gen of |
227 | * IntelIOMMUState to 1. Must be called with IOMMU lock held. |
228 | */ |
229 | static void vtd_reset_context_cache_locked(IntelIOMMUState *s) |
230 | { |
231 | VTDAddressSpace *vtd_as; |
232 | VTDBus *vtd_bus; |
233 | GHashTableIter bus_it; |
234 | uint32_t devfn_it; |
235 | |
236 | trace_vtd_context_cache_reset(); |
237 | |
238 | g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr); |
239 | |
240 | while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) { |
241 | for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { |
242 | vtd_as = vtd_bus->dev_as[devfn_it]; |
243 | if (!vtd_as) { |
244 | continue; |
245 | } |
246 | vtd_as->context_cache_entry.context_cache_gen = 0; |
247 | } |
248 | } |
249 | s->context_cache_gen = 1; |
250 | } |
251 | |
252 | /* Must be called with IOMMU lock held. */ |
253 | static void vtd_reset_iotlb_locked(IntelIOMMUState *s) |
254 | { |
255 | assert(s->iotlb); |
256 | g_hash_table_remove_all(s->iotlb); |
257 | } |
258 | |
259 | static void vtd_reset_iotlb(IntelIOMMUState *s) |
260 | { |
261 | vtd_iommu_lock(s); |
262 | vtd_reset_iotlb_locked(s); |
263 | vtd_iommu_unlock(s); |
264 | } |
265 | |
266 | static void vtd_reset_caches(IntelIOMMUState *s) |
267 | { |
268 | vtd_iommu_lock(s); |
269 | vtd_reset_iotlb_locked(s); |
270 | vtd_reset_context_cache_locked(s); |
271 | vtd_iommu_unlock(s); |
272 | } |
273 | |
274 | static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id, |
275 | uint32_t level) |
276 | { |
277 | return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) | |
278 | ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT); |
279 | } |
280 | |
281 | static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level) |
282 | { |
283 | return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K; |
284 | } |
285 | |
286 | /* Must be called with IOMMU lock held */ |
287 | static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id, |
288 | hwaddr addr) |
289 | { |
290 | VTDIOTLBEntry *entry; |
291 | uint64_t key; |
292 | int level; |
293 | |
294 | for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) { |
295 | key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level), |
296 | source_id, level); |
297 | entry = g_hash_table_lookup(s->iotlb, &key); |
298 | if (entry) { |
299 | goto out; |
300 | } |
301 | } |
302 | |
303 | out: |
304 | return entry; |
305 | } |
306 | |
307 | /* Must be with IOMMU lock held */ |
308 | static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, |
309 | uint16_t domain_id, hwaddr addr, uint64_t slpte, |
310 | uint8_t access_flags, uint32_t level) |
311 | { |
312 | VTDIOTLBEntry *entry = g_malloc(sizeof(*entry)); |
313 | uint64_t *key = g_malloc(sizeof(*key)); |
314 | uint64_t gfn = vtd_get_iotlb_gfn(addr, level); |
315 | |
316 | trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id); |
317 | if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) { |
318 | trace_vtd_iotlb_reset("iotlb exceeds size limit" ); |
319 | vtd_reset_iotlb_locked(s); |
320 | } |
321 | |
322 | entry->gfn = gfn; |
323 | entry->domain_id = domain_id; |
324 | entry->slpte = slpte; |
325 | entry->access_flags = access_flags; |
326 | entry->mask = vtd_slpt_level_page_mask(level); |
327 | *key = vtd_get_iotlb_key(gfn, source_id, level); |
328 | g_hash_table_replace(s->iotlb, key, entry); |
329 | } |
330 | |
331 | /* Given the reg addr of both the message data and address, generate an |
332 | * interrupt via MSI. |
333 | */ |
334 | static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg, |
335 | hwaddr mesg_data_reg) |
336 | { |
337 | MSIMessage msi; |
338 | |
339 | assert(mesg_data_reg < DMAR_REG_SIZE); |
340 | assert(mesg_addr_reg < DMAR_REG_SIZE); |
341 | |
342 | msi.address = vtd_get_long_raw(s, mesg_addr_reg); |
343 | msi.data = vtd_get_long_raw(s, mesg_data_reg); |
344 | |
345 | trace_vtd_irq_generate(msi.address, msi.data); |
346 | |
347 | apic_get_class()->send_msi(&msi); |
348 | } |
349 | |
350 | /* Generate a fault event to software via MSI if conditions are met. |
351 | * Notice that the value of FSTS_REG being passed to it should be the one |
352 | * before any update. |
353 | */ |
354 | static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts) |
355 | { |
356 | if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO || |
357 | pre_fsts & VTD_FSTS_IQE) { |
358 | error_report_once("There are previous interrupt conditions " |
359 | "to be serviced by software, fault event " |
360 | "is not generated" ); |
361 | return; |
362 | } |
363 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP); |
364 | if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) { |
365 | error_report_once("Interrupt Mask set, irq is not generated" ); |
366 | } else { |
367 | vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); |
368 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); |
369 | } |
370 | } |
371 | |
372 | /* Check if the Fault (F) field of the Fault Recording Register referenced by |
373 | * @index is Set. |
374 | */ |
375 | static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index) |
376 | { |
377 | /* Each reg is 128-bit */ |
378 | hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); |
379 | addr += 8; /* Access the high 64-bit half */ |
380 | |
381 | assert(index < DMAR_FRCD_REG_NR); |
382 | |
383 | return vtd_get_quad_raw(s, addr) & VTD_FRCD_F; |
384 | } |
385 | |
386 | /* Update the PPF field of Fault Status Register. |
387 | * Should be called whenever change the F field of any fault recording |
388 | * registers. |
389 | */ |
390 | static void vtd_update_fsts_ppf(IntelIOMMUState *s) |
391 | { |
392 | uint32_t i; |
393 | uint32_t ppf_mask = 0; |
394 | |
395 | for (i = 0; i < DMAR_FRCD_REG_NR; i++) { |
396 | if (vtd_is_frcd_set(s, i)) { |
397 | ppf_mask = VTD_FSTS_PPF; |
398 | break; |
399 | } |
400 | } |
401 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask); |
402 | trace_vtd_fsts_ppf(!!ppf_mask); |
403 | } |
404 | |
405 | static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index) |
406 | { |
407 | /* Each reg is 128-bit */ |
408 | hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); |
409 | addr += 8; /* Access the high 64-bit half */ |
410 | |
411 | assert(index < DMAR_FRCD_REG_NR); |
412 | |
413 | vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F); |
414 | vtd_update_fsts_ppf(s); |
415 | } |
416 | |
417 | /* Must not update F field now, should be done later */ |
418 | static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index, |
419 | uint16_t source_id, hwaddr addr, |
420 | VTDFaultReason fault, bool is_write) |
421 | { |
422 | uint64_t hi = 0, lo; |
423 | hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); |
424 | |
425 | assert(index < DMAR_FRCD_REG_NR); |
426 | |
427 | lo = VTD_FRCD_FI(addr); |
428 | hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault); |
429 | if (!is_write) { |
430 | hi |= VTD_FRCD_T; |
431 | } |
432 | vtd_set_quad_raw(s, frcd_reg_addr, lo); |
433 | vtd_set_quad_raw(s, frcd_reg_addr + 8, hi); |
434 | |
435 | trace_vtd_frr_new(index, hi, lo); |
436 | } |
437 | |
438 | /* Try to collapse multiple pending faults from the same requester */ |
439 | static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id) |
440 | { |
441 | uint32_t i; |
442 | uint64_t frcd_reg; |
443 | hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */ |
444 | |
445 | for (i = 0; i < DMAR_FRCD_REG_NR; i++) { |
446 | frcd_reg = vtd_get_quad_raw(s, addr); |
447 | if ((frcd_reg & VTD_FRCD_F) && |
448 | ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) { |
449 | return true; |
450 | } |
451 | addr += 16; /* 128-bit for each */ |
452 | } |
453 | return false; |
454 | } |
455 | |
456 | /* Log and report an DMAR (address translation) fault to software */ |
457 | static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id, |
458 | hwaddr addr, VTDFaultReason fault, |
459 | bool is_write) |
460 | { |
461 | uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); |
462 | |
463 | assert(fault < VTD_FR_MAX); |
464 | |
465 | if (fault == VTD_FR_RESERVED_ERR) { |
466 | /* This is not a normal fault reason case. Drop it. */ |
467 | return; |
468 | } |
469 | |
470 | trace_vtd_dmar_fault(source_id, fault, addr, is_write); |
471 | |
472 | if (fsts_reg & VTD_FSTS_PFO) { |
473 | error_report_once("New fault is not recorded due to " |
474 | "Primary Fault Overflow" ); |
475 | return; |
476 | } |
477 | |
478 | if (vtd_try_collapse_fault(s, source_id)) { |
479 | error_report_once("New fault is not recorded due to " |
480 | "compression of faults" ); |
481 | return; |
482 | } |
483 | |
484 | if (vtd_is_frcd_set(s, s->next_frcd_reg)) { |
485 | error_report_once("Next Fault Recording Reg is used, " |
486 | "new fault is not recorded, set PFO field" ); |
487 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO); |
488 | return; |
489 | } |
490 | |
491 | vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write); |
492 | |
493 | if (fsts_reg & VTD_FSTS_PPF) { |
494 | error_report_once("There are pending faults already, " |
495 | "fault event is not generated" ); |
496 | vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); |
497 | s->next_frcd_reg++; |
498 | if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { |
499 | s->next_frcd_reg = 0; |
500 | } |
501 | } else { |
502 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK, |
503 | VTD_FSTS_FRI(s->next_frcd_reg)); |
504 | vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */ |
505 | s->next_frcd_reg++; |
506 | if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { |
507 | s->next_frcd_reg = 0; |
508 | } |
509 | /* This case actually cause the PPF to be Set. |
510 | * So generate fault event (interrupt). |
511 | */ |
512 | vtd_generate_fault_event(s, fsts_reg); |
513 | } |
514 | } |
515 | |
516 | /* Handle Invalidation Queue Errors of queued invalidation interface error |
517 | * conditions. |
518 | */ |
519 | static void vtd_handle_inv_queue_error(IntelIOMMUState *s) |
520 | { |
521 | uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); |
522 | |
523 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE); |
524 | vtd_generate_fault_event(s, fsts_reg); |
525 | } |
526 | |
527 | /* Set the IWC field and try to generate an invalidation completion interrupt */ |
528 | static void vtd_generate_completion_event(IntelIOMMUState *s) |
529 | { |
530 | if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) { |
531 | trace_vtd_inv_desc_wait_irq("One pending, skip current" ); |
532 | return; |
533 | } |
534 | vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC); |
535 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP); |
536 | if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) { |
537 | trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, " |
538 | "new event not generated" ); |
539 | return; |
540 | } else { |
541 | /* Generate the interrupt event */ |
542 | trace_vtd_inv_desc_wait_irq("Generating complete event" ); |
543 | vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); |
544 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); |
545 | } |
546 | } |
547 | |
548 | static inline bool vtd_root_entry_present(IntelIOMMUState *s, |
549 | VTDRootEntry *re, |
550 | uint8_t devfn) |
551 | { |
552 | if (s->root_scalable && devfn > UINT8_MAX / 2) { |
553 | return re->hi & VTD_ROOT_ENTRY_P; |
554 | } |
555 | |
556 | return re->lo & VTD_ROOT_ENTRY_P; |
557 | } |
558 | |
559 | static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, |
560 | VTDRootEntry *re) |
561 | { |
562 | dma_addr_t addr; |
563 | |
564 | addr = s->root + index * sizeof(*re); |
565 | if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) { |
566 | re->lo = 0; |
567 | return -VTD_FR_ROOT_TABLE_INV; |
568 | } |
569 | re->lo = le64_to_cpu(re->lo); |
570 | re->hi = le64_to_cpu(re->hi); |
571 | return 0; |
572 | } |
573 | |
574 | static inline bool vtd_ce_present(VTDContextEntry *context) |
575 | { |
576 | return context->lo & VTD_CONTEXT_ENTRY_P; |
577 | } |
578 | |
579 | static int vtd_get_context_entry_from_root(IntelIOMMUState *s, |
580 | VTDRootEntry *re, |
581 | uint8_t index, |
582 | VTDContextEntry *ce) |
583 | { |
584 | dma_addr_t addr, ce_size; |
585 | |
586 | /* we have checked that root entry is present */ |
587 | ce_size = s->root_scalable ? VTD_CTX_ENTRY_SCALABLE_SIZE : |
588 | VTD_CTX_ENTRY_LEGACY_SIZE; |
589 | |
590 | if (s->root_scalable && index > UINT8_MAX / 2) { |
591 | index = index & (~VTD_DEVFN_CHECK_MASK); |
592 | addr = re->hi & VTD_ROOT_ENTRY_CTP; |
593 | } else { |
594 | addr = re->lo & VTD_ROOT_ENTRY_CTP; |
595 | } |
596 | |
597 | addr = addr + index * ce_size; |
598 | if (dma_memory_read(&address_space_memory, addr, ce, ce_size)) { |
599 | return -VTD_FR_CONTEXT_TABLE_INV; |
600 | } |
601 | |
602 | ce->lo = le64_to_cpu(ce->lo); |
603 | ce->hi = le64_to_cpu(ce->hi); |
604 | if (ce_size == VTD_CTX_ENTRY_SCALABLE_SIZE) { |
605 | ce->val[2] = le64_to_cpu(ce->val[2]); |
606 | ce->val[3] = le64_to_cpu(ce->val[3]); |
607 | } |
608 | return 0; |
609 | } |
610 | |
611 | static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce) |
612 | { |
613 | return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR; |
614 | } |
615 | |
616 | static inline uint64_t vtd_get_slpte_addr(uint64_t slpte, uint8_t aw) |
617 | { |
618 | return slpte & VTD_SL_PT_BASE_ADDR_MASK(aw); |
619 | } |
620 | |
621 | /* Whether the pte indicates the address of the page frame */ |
622 | static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level) |
623 | { |
624 | return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK); |
625 | } |
626 | |
627 | /* Get the content of a spte located in @base_addr[@index] */ |
628 | static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index) |
629 | { |
630 | uint64_t slpte; |
631 | |
632 | assert(index < VTD_SL_PT_ENTRY_NR); |
633 | |
634 | if (dma_memory_read(&address_space_memory, |
635 | base_addr + index * sizeof(slpte), &slpte, |
636 | sizeof(slpte))) { |
637 | slpte = (uint64_t)-1; |
638 | return slpte; |
639 | } |
640 | slpte = le64_to_cpu(slpte); |
641 | return slpte; |
642 | } |
643 | |
644 | /* Given an iova and the level of paging structure, return the offset |
645 | * of current level. |
646 | */ |
647 | static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level) |
648 | { |
649 | return (iova >> vtd_slpt_level_shift(level)) & |
650 | ((1ULL << VTD_SL_LEVEL_BITS) - 1); |
651 | } |
652 | |
653 | /* Check Capability Register to see if the @level of page-table is supported */ |
654 | static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level) |
655 | { |
656 | return VTD_CAP_SAGAW_MASK & s->cap & |
657 | (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); |
658 | } |
659 | |
660 | /* Return true if check passed, otherwise false */ |
661 | static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu, |
662 | VTDPASIDEntry *pe) |
663 | { |
664 | switch (VTD_PE_GET_TYPE(pe)) { |
665 | case VTD_SM_PASID_ENTRY_FLT: |
666 | case VTD_SM_PASID_ENTRY_SLT: |
667 | case VTD_SM_PASID_ENTRY_NESTED: |
668 | break; |
669 | case VTD_SM_PASID_ENTRY_PT: |
670 | if (!x86_iommu->pt_supported) { |
671 | return false; |
672 | } |
673 | break; |
674 | default: |
675 | /* Unknwon type */ |
676 | return false; |
677 | } |
678 | return true; |
679 | } |
680 | |
681 | static int vtd_get_pasid_dire(dma_addr_t pasid_dir_base, |
682 | uint32_t pasid, |
683 | VTDPASIDDirEntry *pdire) |
684 | { |
685 | uint32_t index; |
686 | dma_addr_t addr, entry_size; |
687 | |
688 | index = VTD_PASID_DIR_INDEX(pasid); |
689 | entry_size = VTD_PASID_DIR_ENTRY_SIZE; |
690 | addr = pasid_dir_base + index * entry_size; |
691 | if (dma_memory_read(&address_space_memory, addr, pdire, entry_size)) { |
692 | return -VTD_FR_PASID_TABLE_INV; |
693 | } |
694 | |
695 | return 0; |
696 | } |
697 | |
698 | static int vtd_get_pasid_entry(IntelIOMMUState *s, |
699 | uint32_t pasid, |
700 | VTDPASIDDirEntry *pdire, |
701 | VTDPASIDEntry *pe) |
702 | { |
703 | uint32_t index; |
704 | dma_addr_t addr, entry_size; |
705 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
706 | |
707 | index = VTD_PASID_TABLE_INDEX(pasid); |
708 | entry_size = VTD_PASID_ENTRY_SIZE; |
709 | addr = pdire->val & VTD_PASID_TABLE_BASE_ADDR_MASK; |
710 | addr = addr + index * entry_size; |
711 | if (dma_memory_read(&address_space_memory, addr, pe, entry_size)) { |
712 | return -VTD_FR_PASID_TABLE_INV; |
713 | } |
714 | |
715 | /* Do translation type check */ |
716 | if (!vtd_pe_type_check(x86_iommu, pe)) { |
717 | return -VTD_FR_PASID_TABLE_INV; |
718 | } |
719 | |
720 | if (!vtd_is_level_supported(s, VTD_PE_GET_LEVEL(pe))) { |
721 | return -VTD_FR_PASID_TABLE_INV; |
722 | } |
723 | |
724 | return 0; |
725 | } |
726 | |
727 | static int vtd_get_pasid_entry_from_pasid(IntelIOMMUState *s, |
728 | dma_addr_t pasid_dir_base, |
729 | uint32_t pasid, |
730 | VTDPASIDEntry *pe) |
731 | { |
732 | int ret; |
733 | VTDPASIDDirEntry pdire; |
734 | |
735 | ret = vtd_get_pasid_dire(pasid_dir_base, pasid, &pdire); |
736 | if (ret) { |
737 | return ret; |
738 | } |
739 | |
740 | ret = vtd_get_pasid_entry(s, pasid, &pdire, pe); |
741 | if (ret) { |
742 | return ret; |
743 | } |
744 | |
745 | return ret; |
746 | } |
747 | |
748 | static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState *s, |
749 | VTDContextEntry *ce, |
750 | VTDPASIDEntry *pe) |
751 | { |
752 | uint32_t pasid; |
753 | dma_addr_t pasid_dir_base; |
754 | int ret = 0; |
755 | |
756 | pasid = VTD_CE_GET_RID2PASID(ce); |
757 | pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); |
758 | ret = vtd_get_pasid_entry_from_pasid(s, pasid_dir_base, pasid, pe); |
759 | |
760 | return ret; |
761 | } |
762 | |
763 | static int vtd_ce_get_pasid_fpd(IntelIOMMUState *s, |
764 | VTDContextEntry *ce, |
765 | bool *pe_fpd_set) |
766 | { |
767 | int ret; |
768 | uint32_t pasid; |
769 | dma_addr_t pasid_dir_base; |
770 | VTDPASIDDirEntry pdire; |
771 | VTDPASIDEntry pe; |
772 | |
773 | pasid = VTD_CE_GET_RID2PASID(ce); |
774 | pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); |
775 | |
776 | ret = vtd_get_pasid_dire(pasid_dir_base, pasid, &pdire); |
777 | if (ret) { |
778 | return ret; |
779 | } |
780 | |
781 | if (pdire.val & VTD_PASID_DIR_FPD) { |
782 | *pe_fpd_set = true; |
783 | return 0; |
784 | } |
785 | |
786 | ret = vtd_get_pasid_entry(s, pasid, &pdire, &pe); |
787 | if (ret) { |
788 | return ret; |
789 | } |
790 | |
791 | if (pe.val[0] & VTD_PASID_ENTRY_FPD) { |
792 | *pe_fpd_set = true; |
793 | } |
794 | |
795 | return 0; |
796 | } |
797 | |
798 | /* Get the page-table level that hardware should use for the second-level |
799 | * page-table walk from the Address Width field of context-entry. |
800 | */ |
801 | static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce) |
802 | { |
803 | return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW); |
804 | } |
805 | |
806 | static uint32_t vtd_get_iova_level(IntelIOMMUState *s, |
807 | VTDContextEntry *ce) |
808 | { |
809 | VTDPASIDEntry pe; |
810 | |
811 | if (s->root_scalable) { |
812 | vtd_ce_get_rid2pasid_entry(s, ce, &pe); |
813 | return VTD_PE_GET_LEVEL(&pe); |
814 | } |
815 | |
816 | return vtd_ce_get_level(ce); |
817 | } |
818 | |
819 | static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce) |
820 | { |
821 | return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; |
822 | } |
823 | |
824 | static uint32_t vtd_get_iova_agaw(IntelIOMMUState *s, |
825 | VTDContextEntry *ce) |
826 | { |
827 | VTDPASIDEntry pe; |
828 | |
829 | if (s->root_scalable) { |
830 | vtd_ce_get_rid2pasid_entry(s, ce, &pe); |
831 | return 30 + ((pe.val[0] >> 2) & VTD_SM_PASID_ENTRY_AW) * 9; |
832 | } |
833 | |
834 | return vtd_ce_get_agaw(ce); |
835 | } |
836 | |
837 | static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce) |
838 | { |
839 | return ce->lo & VTD_CONTEXT_ENTRY_TT; |
840 | } |
841 | |
842 | /* Only for Legacy Mode. Return true if check passed, otherwise false */ |
843 | static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, |
844 | VTDContextEntry *ce) |
845 | { |
846 | switch (vtd_ce_get_type(ce)) { |
847 | case VTD_CONTEXT_TT_MULTI_LEVEL: |
848 | /* Always supported */ |
849 | break; |
850 | case VTD_CONTEXT_TT_DEV_IOTLB: |
851 | if (!x86_iommu->dt_supported) { |
852 | error_report_once("%s: DT specified but not supported" , __func__); |
853 | return false; |
854 | } |
855 | break; |
856 | case VTD_CONTEXT_TT_PASS_THROUGH: |
857 | if (!x86_iommu->pt_supported) { |
858 | error_report_once("%s: PT specified but not supported" , __func__); |
859 | return false; |
860 | } |
861 | break; |
862 | default: |
863 | /* Unknown type */ |
864 | error_report_once("%s: unknown ce type: %" PRIu32, __func__, |
865 | vtd_ce_get_type(ce)); |
866 | return false; |
867 | } |
868 | return true; |
869 | } |
870 | |
871 | static inline uint64_t vtd_iova_limit(IntelIOMMUState *s, |
872 | VTDContextEntry *ce, uint8_t aw) |
873 | { |
874 | uint32_t ce_agaw = vtd_get_iova_agaw(s, ce); |
875 | return 1ULL << MIN(ce_agaw, aw); |
876 | } |
877 | |
878 | /* Return true if IOVA passes range check, otherwise false. */ |
879 | static inline bool vtd_iova_range_check(IntelIOMMUState *s, |
880 | uint64_t iova, VTDContextEntry *ce, |
881 | uint8_t aw) |
882 | { |
883 | /* |
884 | * Check if @iova is above 2^X-1, where X is the minimum of MGAW |
885 | * in CAP_REG and AW in context-entry. |
886 | */ |
887 | return !(iova & ~(vtd_iova_limit(s, ce, aw) - 1)); |
888 | } |
889 | |
890 | static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s, |
891 | VTDContextEntry *ce) |
892 | { |
893 | VTDPASIDEntry pe; |
894 | |
895 | if (s->root_scalable) { |
896 | vtd_ce_get_rid2pasid_entry(s, ce, &pe); |
897 | return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR; |
898 | } |
899 | |
900 | return vtd_ce_get_slpt_base(ce); |
901 | } |
902 | |
903 | /* |
904 | * Rsvd field masks for spte: |
905 | * Index [1] to [4] 4k pages |
906 | * Index [5] to [8] large pages |
907 | */ |
908 | static uint64_t vtd_paging_entry_rsvd_field[9]; |
909 | |
910 | static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level) |
911 | { |
912 | if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) { |
913 | /* Maybe large page */ |
914 | return slpte & vtd_paging_entry_rsvd_field[level + 4]; |
915 | } else { |
916 | return slpte & vtd_paging_entry_rsvd_field[level]; |
917 | } |
918 | } |
919 | |
920 | /* Find the VTD address space associated with a given bus number */ |
921 | static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) |
922 | { |
923 | VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num]; |
924 | if (!vtd_bus) { |
925 | /* |
926 | * Iterate over the registered buses to find the one which |
927 | * currently hold this bus number, and update the bus_num |
928 | * lookup table: |
929 | */ |
930 | GHashTableIter iter; |
931 | |
932 | g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); |
933 | while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { |
934 | if (pci_bus_num(vtd_bus->bus) == bus_num) { |
935 | s->vtd_as_by_bus_num[bus_num] = vtd_bus; |
936 | return vtd_bus; |
937 | } |
938 | } |
939 | } |
940 | return vtd_bus; |
941 | } |
942 | |
943 | /* Given the @iova, get relevant @slptep. @slpte_level will be the last level |
944 | * of the translation, can be used for deciding the size of large page. |
945 | */ |
946 | static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, |
947 | uint64_t iova, bool is_write, |
948 | uint64_t *slptep, uint32_t *slpte_level, |
949 | bool *reads, bool *writes, uint8_t aw_bits) |
950 | { |
951 | dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); |
952 | uint32_t level = vtd_get_iova_level(s, ce); |
953 | uint32_t offset; |
954 | uint64_t slpte; |
955 | uint64_t access_right_check; |
956 | |
957 | if (!vtd_iova_range_check(s, iova, ce, aw_bits)) { |
958 | error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")" , |
959 | __func__, iova); |
960 | return -VTD_FR_ADDR_BEYOND_MGAW; |
961 | } |
962 | |
963 | /* FIXME: what is the Atomics request here? */ |
964 | access_right_check = is_write ? VTD_SL_W : VTD_SL_R; |
965 | |
966 | while (true) { |
967 | offset = vtd_iova_level_offset(iova, level); |
968 | slpte = vtd_get_slpte(addr, offset); |
969 | |
970 | if (slpte == (uint64_t)-1) { |
971 | error_report_once("%s: detected read error on DMAR slpte " |
972 | "(iova=0x%" PRIx64 ")" , __func__, iova); |
973 | if (level == vtd_get_iova_level(s, ce)) { |
974 | /* Invalid programming of context-entry */ |
975 | return -VTD_FR_CONTEXT_ENTRY_INV; |
976 | } else { |
977 | return -VTD_FR_PAGING_ENTRY_INV; |
978 | } |
979 | } |
980 | *reads = (*reads) && (slpte & VTD_SL_R); |
981 | *writes = (*writes) && (slpte & VTD_SL_W); |
982 | if (!(slpte & access_right_check)) { |
983 | error_report_once("%s: detected slpte permission error " |
984 | "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " |
985 | "slpte=0x%" PRIx64 ", write=%d)" , __func__, |
986 | iova, level, slpte, is_write); |
987 | return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; |
988 | } |
989 | if (vtd_slpte_nonzero_rsvd(slpte, level)) { |
990 | error_report_once("%s: detected splte reserve non-zero " |
991 | "iova=0x%" PRIx64 ", level=0x%" PRIx32 |
992 | "slpte=0x%" PRIx64 ")" , __func__, iova, |
993 | level, slpte); |
994 | return -VTD_FR_PAGING_ENTRY_RSVD; |
995 | } |
996 | |
997 | if (vtd_is_last_slpte(slpte, level)) { |
998 | *slptep = slpte; |
999 | *slpte_level = level; |
1000 | return 0; |
1001 | } |
1002 | addr = vtd_get_slpte_addr(slpte, aw_bits); |
1003 | level--; |
1004 | } |
1005 | } |
1006 | |
1007 | typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private); |
1008 | |
1009 | /** |
1010 | * Constant information used during page walking |
1011 | * |
1012 | * @hook_fn: hook func to be called when detected page |
1013 | * @private: private data to be passed into hook func |
1014 | * @notify_unmap: whether we should notify invalid entries |
1015 | * @as: VT-d address space of the device |
1016 | * @aw: maximum address width |
1017 | * @domain: domain ID of the page walk |
1018 | */ |
1019 | typedef struct { |
1020 | VTDAddressSpace *as; |
1021 | vtd_page_walk_hook hook_fn; |
1022 | void *private; |
1023 | bool notify_unmap; |
1024 | uint8_t aw; |
1025 | uint16_t domain_id; |
1026 | } vtd_page_walk_info; |
1027 | |
1028 | static int vtd_page_walk_one(IOMMUTLBEntry *entry, vtd_page_walk_info *info) |
1029 | { |
1030 | VTDAddressSpace *as = info->as; |
1031 | vtd_page_walk_hook hook_fn = info->hook_fn; |
1032 | void *private = info->private; |
1033 | DMAMap target = { |
1034 | .iova = entry->iova, |
1035 | .size = entry->addr_mask, |
1036 | .translated_addr = entry->translated_addr, |
1037 | .perm = entry->perm, |
1038 | }; |
1039 | DMAMap *mapped = iova_tree_find(as->iova_tree, &target); |
1040 | |
1041 | if (entry->perm == IOMMU_NONE && !info->notify_unmap) { |
1042 | trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); |
1043 | return 0; |
1044 | } |
1045 | |
1046 | assert(hook_fn); |
1047 | |
1048 | /* Update local IOVA mapped ranges */ |
1049 | if (entry->perm) { |
1050 | if (mapped) { |
1051 | /* If it's exactly the same translation, skip */ |
1052 | if (!memcmp(mapped, &target, sizeof(target))) { |
1053 | trace_vtd_page_walk_one_skip_map(entry->iova, entry->addr_mask, |
1054 | entry->translated_addr); |
1055 | return 0; |
1056 | } else { |
1057 | /* |
1058 | * Translation changed. Normally this should not |
1059 | * happen, but it can happen when with buggy guest |
1060 | * OSes. Note that there will be a small window that |
1061 | * we don't have map at all. But that's the best |
1062 | * effort we can do. The ideal way to emulate this is |
1063 | * atomically modify the PTE to follow what has |
1064 | * changed, but we can't. One example is that vfio |
1065 | * driver only has VFIO_IOMMU_[UN]MAP_DMA but no |
1066 | * interface to modify a mapping (meanwhile it seems |
1067 | * meaningless to even provide one). Anyway, let's |
1068 | * mark this as a TODO in case one day we'll have |
1069 | * a better solution. |
1070 | */ |
1071 | IOMMUAccessFlags cache_perm = entry->perm; |
1072 | int ret; |
1073 | |
1074 | /* Emulate an UNMAP */ |
1075 | entry->perm = IOMMU_NONE; |
1076 | trace_vtd_page_walk_one(info->domain_id, |
1077 | entry->iova, |
1078 | entry->translated_addr, |
1079 | entry->addr_mask, |
1080 | entry->perm); |
1081 | ret = hook_fn(entry, private); |
1082 | if (ret) { |
1083 | return ret; |
1084 | } |
1085 | /* Drop any existing mapping */ |
1086 | iova_tree_remove(as->iova_tree, &target); |
1087 | /* Recover the correct permission */ |
1088 | entry->perm = cache_perm; |
1089 | } |
1090 | } |
1091 | iova_tree_insert(as->iova_tree, &target); |
1092 | } else { |
1093 | if (!mapped) { |
1094 | /* Skip since we didn't map this range at all */ |
1095 | trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); |
1096 | return 0; |
1097 | } |
1098 | iova_tree_remove(as->iova_tree, &target); |
1099 | } |
1100 | |
1101 | trace_vtd_page_walk_one(info->domain_id, entry->iova, |
1102 | entry->translated_addr, entry->addr_mask, |
1103 | entry->perm); |
1104 | return hook_fn(entry, private); |
1105 | } |
1106 | |
1107 | /** |
1108 | * vtd_page_walk_level - walk over specific level for IOVA range |
1109 | * |
1110 | * @addr: base GPA addr to start the walk |
1111 | * @start: IOVA range start address |
1112 | * @end: IOVA range end address (start <= addr < end) |
1113 | * @read: whether parent level has read permission |
1114 | * @write: whether parent level has write permission |
1115 | * @info: constant information for the page walk |
1116 | */ |
1117 | static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, |
1118 | uint64_t end, uint32_t level, bool read, |
1119 | bool write, vtd_page_walk_info *info) |
1120 | { |
1121 | bool read_cur, write_cur, entry_valid; |
1122 | uint32_t offset; |
1123 | uint64_t slpte; |
1124 | uint64_t subpage_size, subpage_mask; |
1125 | IOMMUTLBEntry entry; |
1126 | uint64_t iova = start; |
1127 | uint64_t iova_next; |
1128 | int ret = 0; |
1129 | |
1130 | trace_vtd_page_walk_level(addr, level, start, end); |
1131 | |
1132 | subpage_size = 1ULL << vtd_slpt_level_shift(level); |
1133 | subpage_mask = vtd_slpt_level_page_mask(level); |
1134 | |
1135 | while (iova < end) { |
1136 | iova_next = (iova & subpage_mask) + subpage_size; |
1137 | |
1138 | offset = vtd_iova_level_offset(iova, level); |
1139 | slpte = vtd_get_slpte(addr, offset); |
1140 | |
1141 | if (slpte == (uint64_t)-1) { |
1142 | trace_vtd_page_walk_skip_read(iova, iova_next); |
1143 | goto next; |
1144 | } |
1145 | |
1146 | if (vtd_slpte_nonzero_rsvd(slpte, level)) { |
1147 | trace_vtd_page_walk_skip_reserve(iova, iova_next); |
1148 | goto next; |
1149 | } |
1150 | |
1151 | /* Permissions are stacked with parents' */ |
1152 | read_cur = read && (slpte & VTD_SL_R); |
1153 | write_cur = write && (slpte & VTD_SL_W); |
1154 | |
1155 | /* |
1156 | * As long as we have either read/write permission, this is a |
1157 | * valid entry. The rule works for both page entries and page |
1158 | * table entries. |
1159 | */ |
1160 | entry_valid = read_cur | write_cur; |
1161 | |
1162 | if (!vtd_is_last_slpte(slpte, level) && entry_valid) { |
1163 | /* |
1164 | * This is a valid PDE (or even bigger than PDE). We need |
1165 | * to walk one further level. |
1166 | */ |
1167 | ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw), |
1168 | iova, MIN(iova_next, end), level - 1, |
1169 | read_cur, write_cur, info); |
1170 | } else { |
1171 | /* |
1172 | * This means we are either: |
1173 | * |
1174 | * (1) the real page entry (either 4K page, or huge page) |
1175 | * (2) the whole range is invalid |
1176 | * |
1177 | * In either case, we send an IOTLB notification down. |
1178 | */ |
1179 | entry.target_as = &address_space_memory; |
1180 | entry.iova = iova & subpage_mask; |
1181 | entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); |
1182 | entry.addr_mask = ~subpage_mask; |
1183 | /* NOTE: this is only meaningful if entry_valid == true */ |
1184 | entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw); |
1185 | ret = vtd_page_walk_one(&entry, info); |
1186 | } |
1187 | |
1188 | if (ret < 0) { |
1189 | return ret; |
1190 | } |
1191 | |
1192 | next: |
1193 | iova = iova_next; |
1194 | } |
1195 | |
1196 | return 0; |
1197 | } |
1198 | |
1199 | /** |
1200 | * vtd_page_walk - walk specific IOVA range, and call the hook |
1201 | * |
1202 | * @s: intel iommu state |
1203 | * @ce: context entry to walk upon |
1204 | * @start: IOVA address to start the walk |
1205 | * @end: IOVA range end address (start <= addr < end) |
1206 | * @info: page walking information struct |
1207 | */ |
1208 | static int vtd_page_walk(IntelIOMMUState *s, VTDContextEntry *ce, |
1209 | uint64_t start, uint64_t end, |
1210 | vtd_page_walk_info *info) |
1211 | { |
1212 | dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); |
1213 | uint32_t level = vtd_get_iova_level(s, ce); |
1214 | |
1215 | if (!vtd_iova_range_check(s, start, ce, info->aw)) { |
1216 | return -VTD_FR_ADDR_BEYOND_MGAW; |
1217 | } |
1218 | |
1219 | if (!vtd_iova_range_check(s, end, ce, info->aw)) { |
1220 | /* Fix end so that it reaches the maximum */ |
1221 | end = vtd_iova_limit(s, ce, info->aw); |
1222 | } |
1223 | |
1224 | return vtd_page_walk_level(addr, start, end, level, true, true, info); |
1225 | } |
1226 | |
1227 | static int vtd_root_entry_rsvd_bits_check(IntelIOMMUState *s, |
1228 | VTDRootEntry *re) |
1229 | { |
1230 | /* Legacy Mode reserved bits check */ |
1231 | if (!s->root_scalable && |
1232 | (re->hi || (re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) |
1233 | goto rsvd_err; |
1234 | |
1235 | /* Scalable Mode reserved bits check */ |
1236 | if (s->root_scalable && |
1237 | ((re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)) || |
1238 | (re->hi & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) |
1239 | goto rsvd_err; |
1240 | |
1241 | return 0; |
1242 | |
1243 | rsvd_err: |
1244 | error_report_once("%s: invalid root entry: hi=0x%" PRIx64 |
1245 | ", lo=0x%" PRIx64, |
1246 | __func__, re->hi, re->lo); |
1247 | return -VTD_FR_ROOT_ENTRY_RSVD; |
1248 | } |
1249 | |
1250 | static inline int vtd_context_entry_rsvd_bits_check(IntelIOMMUState *s, |
1251 | VTDContextEntry *ce) |
1252 | { |
1253 | if (!s->root_scalable && |
1254 | (ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI || |
1255 | ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { |
1256 | error_report_once("%s: invalid context entry: hi=%" PRIx64 |
1257 | ", lo=%" PRIx64" (reserved nonzero)" , |
1258 | __func__, ce->hi, ce->lo); |
1259 | return -VTD_FR_CONTEXT_ENTRY_RSVD; |
1260 | } |
1261 | |
1262 | if (s->root_scalable && |
1263 | (ce->val[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s->aw_bits) || |
1264 | ce->val[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 || |
1265 | ce->val[2] || |
1266 | ce->val[3])) { |
1267 | error_report_once("%s: invalid context entry: val[3]=%" PRIx64 |
1268 | ", val[2]=%" PRIx64 |
1269 | ", val[1]=%" PRIx64 |
1270 | ", val[0]=%" PRIx64" (reserved nonzero)" , |
1271 | __func__, ce->val[3], ce->val[2], |
1272 | ce->val[1], ce->val[0]); |
1273 | return -VTD_FR_CONTEXT_ENTRY_RSVD; |
1274 | } |
1275 | |
1276 | return 0; |
1277 | } |
1278 | |
1279 | static int vtd_ce_rid2pasid_check(IntelIOMMUState *s, |
1280 | VTDContextEntry *ce) |
1281 | { |
1282 | VTDPASIDEntry pe; |
1283 | |
1284 | /* |
1285 | * Make sure in Scalable Mode, a present context entry |
1286 | * has valid rid2pasid setting, which includes valid |
1287 | * rid2pasid field and corresponding pasid entry setting |
1288 | */ |
1289 | return vtd_ce_get_rid2pasid_entry(s, ce, &pe); |
1290 | } |
1291 | |
1292 | /* Map a device to its corresponding domain (context-entry) */ |
1293 | static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, |
1294 | uint8_t devfn, VTDContextEntry *ce) |
1295 | { |
1296 | VTDRootEntry re; |
1297 | int ret_fr; |
1298 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
1299 | |
1300 | ret_fr = vtd_get_root_entry(s, bus_num, &re); |
1301 | if (ret_fr) { |
1302 | return ret_fr; |
1303 | } |
1304 | |
1305 | if (!vtd_root_entry_present(s, &re, devfn)) { |
1306 | /* Not error - it's okay we don't have root entry. */ |
1307 | trace_vtd_re_not_present(bus_num); |
1308 | return -VTD_FR_ROOT_ENTRY_P; |
1309 | } |
1310 | |
1311 | ret_fr = vtd_root_entry_rsvd_bits_check(s, &re); |
1312 | if (ret_fr) { |
1313 | return ret_fr; |
1314 | } |
1315 | |
1316 | ret_fr = vtd_get_context_entry_from_root(s, &re, devfn, ce); |
1317 | if (ret_fr) { |
1318 | return ret_fr; |
1319 | } |
1320 | |
1321 | if (!vtd_ce_present(ce)) { |
1322 | /* Not error - it's okay we don't have context entry. */ |
1323 | trace_vtd_ce_not_present(bus_num, devfn); |
1324 | return -VTD_FR_CONTEXT_ENTRY_P; |
1325 | } |
1326 | |
1327 | ret_fr = vtd_context_entry_rsvd_bits_check(s, ce); |
1328 | if (ret_fr) { |
1329 | return ret_fr; |
1330 | } |
1331 | |
1332 | /* Check if the programming of context-entry is valid */ |
1333 | if (!s->root_scalable && |
1334 | !vtd_is_level_supported(s, vtd_ce_get_level(ce))) { |
1335 | error_report_once("%s: invalid context entry: hi=%" PRIx64 |
1336 | ", lo=%" PRIx64" (level %d not supported)" , |
1337 | __func__, ce->hi, ce->lo, |
1338 | vtd_ce_get_level(ce)); |
1339 | return -VTD_FR_CONTEXT_ENTRY_INV; |
1340 | } |
1341 | |
1342 | if (!s->root_scalable) { |
1343 | /* Do translation type check */ |
1344 | if (!vtd_ce_type_check(x86_iommu, ce)) { |
1345 | /* Errors dumped in vtd_ce_type_check() */ |
1346 | return -VTD_FR_CONTEXT_ENTRY_INV; |
1347 | } |
1348 | } else { |
1349 | /* |
1350 | * Check if the programming of context-entry.rid2pasid |
1351 | * and corresponding pasid setting is valid, and thus |
1352 | * avoids to check pasid entry fetching result in future |
1353 | * helper function calling. |
1354 | */ |
1355 | ret_fr = vtd_ce_rid2pasid_check(s, ce); |
1356 | if (ret_fr) { |
1357 | return ret_fr; |
1358 | } |
1359 | } |
1360 | |
1361 | return 0; |
1362 | } |
1363 | |
1364 | static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry, |
1365 | void *private) |
1366 | { |
1367 | memory_region_notify_iommu((IOMMUMemoryRegion *)private, 0, *entry); |
1368 | return 0; |
1369 | } |
1370 | |
1371 | static uint16_t vtd_get_domain_id(IntelIOMMUState *s, |
1372 | VTDContextEntry *ce) |
1373 | { |
1374 | VTDPASIDEntry pe; |
1375 | |
1376 | if (s->root_scalable) { |
1377 | vtd_ce_get_rid2pasid_entry(s, ce, &pe); |
1378 | return VTD_SM_PASID_ENTRY_DID(pe.val[1]); |
1379 | } |
1380 | |
1381 | return VTD_CONTEXT_ENTRY_DID(ce->hi); |
1382 | } |
1383 | |
1384 | static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, |
1385 | VTDContextEntry *ce, |
1386 | hwaddr addr, hwaddr size) |
1387 | { |
1388 | IntelIOMMUState *s = vtd_as->iommu_state; |
1389 | vtd_page_walk_info info = { |
1390 | .hook_fn = vtd_sync_shadow_page_hook, |
1391 | .private = (void *)&vtd_as->iommu, |
1392 | .notify_unmap = true, |
1393 | .aw = s->aw_bits, |
1394 | .as = vtd_as, |
1395 | .domain_id = vtd_get_domain_id(s, ce), |
1396 | }; |
1397 | |
1398 | return vtd_page_walk(s, ce, addr, addr + size, &info); |
1399 | } |
1400 | |
1401 | static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) |
1402 | { |
1403 | int ret; |
1404 | VTDContextEntry ce; |
1405 | IOMMUNotifier *n; |
1406 | |
1407 | ret = vtd_dev_to_context_entry(vtd_as->iommu_state, |
1408 | pci_bus_num(vtd_as->bus), |
1409 | vtd_as->devfn, &ce); |
1410 | if (ret) { |
1411 | if (ret == -VTD_FR_CONTEXT_ENTRY_P) { |
1412 | /* |
1413 | * It's a valid scenario to have a context entry that is |
1414 | * not present. For example, when a device is removed |
1415 | * from an existing domain then the context entry will be |
1416 | * zeroed by the guest before it was put into another |
1417 | * domain. When this happens, instead of synchronizing |
1418 | * the shadow pages we should invalidate all existing |
1419 | * mappings and notify the backends. |
1420 | */ |
1421 | IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { |
1422 | vtd_address_space_unmap(vtd_as, n); |
1423 | } |
1424 | ret = 0; |
1425 | } |
1426 | return ret; |
1427 | } |
1428 | |
1429 | return vtd_sync_shadow_page_table_range(vtd_as, &ce, 0, UINT64_MAX); |
1430 | } |
1431 | |
1432 | /* |
1433 | * Check if specific device is configed to bypass address |
1434 | * translation for DMA requests. In Scalable Mode, bypass |
1435 | * 1st-level translation or 2nd-level translation, it depends |
1436 | * on PGTT setting. |
1437 | */ |
1438 | static bool vtd_dev_pt_enabled(VTDAddressSpace *as) |
1439 | { |
1440 | IntelIOMMUState *s; |
1441 | VTDContextEntry ce; |
1442 | VTDPASIDEntry pe; |
1443 | int ret; |
1444 | |
1445 | assert(as); |
1446 | |
1447 | s = as->iommu_state; |
1448 | ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus), |
1449 | as->devfn, &ce); |
1450 | if (ret) { |
1451 | /* |
1452 | * Possibly failed to parse the context entry for some reason |
1453 | * (e.g., during init, or any guest configuration errors on |
1454 | * context entries). We should assume PT not enabled for |
1455 | * safety. |
1456 | */ |
1457 | return false; |
1458 | } |
1459 | |
1460 | if (s->root_scalable) { |
1461 | ret = vtd_ce_get_rid2pasid_entry(s, &ce, &pe); |
1462 | if (ret) { |
1463 | error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %" PRId32, |
1464 | __func__, ret); |
1465 | return false; |
1466 | } |
1467 | return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT); |
1468 | } |
1469 | |
1470 | return (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH); |
1471 | } |
1472 | |
1473 | /* Return whether the device is using IOMMU translation. */ |
1474 | static bool vtd_switch_address_space(VTDAddressSpace *as) |
1475 | { |
1476 | bool use_iommu; |
1477 | /* Whether we need to take the BQL on our own */ |
1478 | bool take_bql = !qemu_mutex_iothread_locked(); |
1479 | |
1480 | assert(as); |
1481 | |
1482 | use_iommu = as->iommu_state->dmar_enabled && !vtd_dev_pt_enabled(as); |
1483 | |
1484 | trace_vtd_switch_address_space(pci_bus_num(as->bus), |
1485 | VTD_PCI_SLOT(as->devfn), |
1486 | VTD_PCI_FUNC(as->devfn), |
1487 | use_iommu); |
1488 | |
1489 | /* |
1490 | * It's possible that we reach here without BQL, e.g., when called |
1491 | * from vtd_pt_enable_fast_path(). However the memory APIs need |
1492 | * it. We'd better make sure we have had it already, or, take it. |
1493 | */ |
1494 | if (take_bql) { |
1495 | qemu_mutex_lock_iothread(); |
1496 | } |
1497 | |
1498 | /* Turn off first then on the other */ |
1499 | if (use_iommu) { |
1500 | memory_region_set_enabled(&as->nodmar, false); |
1501 | memory_region_set_enabled(MEMORY_REGION(&as->iommu), true); |
1502 | } else { |
1503 | memory_region_set_enabled(MEMORY_REGION(&as->iommu), false); |
1504 | memory_region_set_enabled(&as->nodmar, true); |
1505 | } |
1506 | |
1507 | if (take_bql) { |
1508 | qemu_mutex_unlock_iothread(); |
1509 | } |
1510 | |
1511 | return use_iommu; |
1512 | } |
1513 | |
1514 | static void vtd_switch_address_space_all(IntelIOMMUState *s) |
1515 | { |
1516 | GHashTableIter iter; |
1517 | VTDBus *vtd_bus; |
1518 | int i; |
1519 | |
1520 | g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); |
1521 | while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { |
1522 | for (i = 0; i < PCI_DEVFN_MAX; i++) { |
1523 | if (!vtd_bus->dev_as[i]) { |
1524 | continue; |
1525 | } |
1526 | vtd_switch_address_space(vtd_bus->dev_as[i]); |
1527 | } |
1528 | } |
1529 | } |
1530 | |
1531 | static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn) |
1532 | { |
1533 | return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL); |
1534 | } |
1535 | |
1536 | static const bool vtd_qualified_faults[] = { |
1537 | [VTD_FR_RESERVED] = false, |
1538 | [VTD_FR_ROOT_ENTRY_P] = false, |
1539 | [VTD_FR_CONTEXT_ENTRY_P] = true, |
1540 | [VTD_FR_CONTEXT_ENTRY_INV] = true, |
1541 | [VTD_FR_ADDR_BEYOND_MGAW] = true, |
1542 | [VTD_FR_WRITE] = true, |
1543 | [VTD_FR_READ] = true, |
1544 | [VTD_FR_PAGING_ENTRY_INV] = true, |
1545 | [VTD_FR_ROOT_TABLE_INV] = false, |
1546 | [VTD_FR_CONTEXT_TABLE_INV] = false, |
1547 | [VTD_FR_ROOT_ENTRY_RSVD] = false, |
1548 | [VTD_FR_PAGING_ENTRY_RSVD] = true, |
1549 | [VTD_FR_CONTEXT_ENTRY_TT] = true, |
1550 | [VTD_FR_PASID_TABLE_INV] = false, |
1551 | [VTD_FR_RESERVED_ERR] = false, |
1552 | [VTD_FR_MAX] = false, |
1553 | }; |
1554 | |
1555 | /* To see if a fault condition is "qualified", which is reported to software |
1556 | * only if the FPD field in the context-entry used to process the faulting |
1557 | * request is 0. |
1558 | */ |
1559 | static inline bool vtd_is_qualified_fault(VTDFaultReason fault) |
1560 | { |
1561 | return vtd_qualified_faults[fault]; |
1562 | } |
1563 | |
1564 | static inline bool vtd_is_interrupt_addr(hwaddr addr) |
1565 | { |
1566 | return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST; |
1567 | } |
1568 | |
1569 | static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id) |
1570 | { |
1571 | VTDBus *vtd_bus; |
1572 | VTDAddressSpace *vtd_as; |
1573 | bool success = false; |
1574 | |
1575 | vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id)); |
1576 | if (!vtd_bus) { |
1577 | goto out; |
1578 | } |
1579 | |
1580 | vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)]; |
1581 | if (!vtd_as) { |
1582 | goto out; |
1583 | } |
1584 | |
1585 | if (vtd_switch_address_space(vtd_as) == false) { |
1586 | /* We switched off IOMMU region successfully. */ |
1587 | success = true; |
1588 | } |
1589 | |
1590 | out: |
1591 | trace_vtd_pt_enable_fast_path(source_id, success); |
1592 | } |
1593 | |
1594 | /* Map dev to context-entry then do a paging-structures walk to do a iommu |
1595 | * translation. |
1596 | * |
1597 | * Called from RCU critical section. |
1598 | * |
1599 | * @bus_num: The bus number |
1600 | * @devfn: The devfn, which is the combined of device and function number |
1601 | * @is_write: The access is a write operation |
1602 | * @entry: IOMMUTLBEntry that contain the addr to be translated and result |
1603 | * |
1604 | * Returns true if translation is successful, otherwise false. |
1605 | */ |
1606 | static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, |
1607 | uint8_t devfn, hwaddr addr, bool is_write, |
1608 | IOMMUTLBEntry *entry) |
1609 | { |
1610 | IntelIOMMUState *s = vtd_as->iommu_state; |
1611 | VTDContextEntry ce; |
1612 | uint8_t bus_num = pci_bus_num(bus); |
1613 | VTDContextCacheEntry *cc_entry; |
1614 | uint64_t slpte, page_mask; |
1615 | uint32_t level; |
1616 | uint16_t source_id = vtd_make_source_id(bus_num, devfn); |
1617 | int ret_fr; |
1618 | bool is_fpd_set = false; |
1619 | bool reads = true; |
1620 | bool writes = true; |
1621 | uint8_t access_flags; |
1622 | VTDIOTLBEntry *iotlb_entry; |
1623 | |
1624 | /* |
1625 | * We have standalone memory region for interrupt addresses, we |
1626 | * should never receive translation requests in this region. |
1627 | */ |
1628 | assert(!vtd_is_interrupt_addr(addr)); |
1629 | |
1630 | vtd_iommu_lock(s); |
1631 | |
1632 | cc_entry = &vtd_as->context_cache_entry; |
1633 | |
1634 | /* Try to fetch slpte form IOTLB */ |
1635 | iotlb_entry = vtd_lookup_iotlb(s, source_id, addr); |
1636 | if (iotlb_entry) { |
1637 | trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, |
1638 | iotlb_entry->domain_id); |
1639 | slpte = iotlb_entry->slpte; |
1640 | access_flags = iotlb_entry->access_flags; |
1641 | page_mask = iotlb_entry->mask; |
1642 | goto out; |
1643 | } |
1644 | |
1645 | /* Try to fetch context-entry from cache first */ |
1646 | if (cc_entry->context_cache_gen == s->context_cache_gen) { |
1647 | trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi, |
1648 | cc_entry->context_entry.lo, |
1649 | cc_entry->context_cache_gen); |
1650 | ce = cc_entry->context_entry; |
1651 | is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; |
1652 | if (!is_fpd_set && s->root_scalable) { |
1653 | ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); |
1654 | VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); |
1655 | } |
1656 | } else { |
1657 | ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); |
1658 | is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; |
1659 | if (!ret_fr && !is_fpd_set && s->root_scalable) { |
1660 | ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); |
1661 | } |
1662 | VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); |
1663 | /* Update context-cache */ |
1664 | trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo, |
1665 | cc_entry->context_cache_gen, |
1666 | s->context_cache_gen); |
1667 | cc_entry->context_entry = ce; |
1668 | cc_entry->context_cache_gen = s->context_cache_gen; |
1669 | } |
1670 | |
1671 | /* |
1672 | * We don't need to translate for pass-through context entries. |
1673 | * Also, let's ignore IOTLB caching as well for PT devices. |
1674 | */ |
1675 | if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) { |
1676 | entry->iova = addr & VTD_PAGE_MASK_4K; |
1677 | entry->translated_addr = entry->iova; |
1678 | entry->addr_mask = ~VTD_PAGE_MASK_4K; |
1679 | entry->perm = IOMMU_RW; |
1680 | trace_vtd_translate_pt(source_id, entry->iova); |
1681 | |
1682 | /* |
1683 | * When this happens, it means firstly caching-mode is not |
1684 | * enabled, and this is the first passthrough translation for |
1685 | * the device. Let's enable the fast path for passthrough. |
1686 | * |
1687 | * When passthrough is disabled again for the device, we can |
1688 | * capture it via the context entry invalidation, then the |
1689 | * IOMMU region can be swapped back. |
1690 | */ |
1691 | vtd_pt_enable_fast_path(s, source_id); |
1692 | vtd_iommu_unlock(s); |
1693 | return true; |
1694 | } |
1695 | |
1696 | ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &slpte, &level, |
1697 | &reads, &writes, s->aw_bits); |
1698 | VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); |
1699 | |
1700 | page_mask = vtd_slpt_level_page_mask(level); |
1701 | access_flags = IOMMU_ACCESS_FLAG(reads, writes); |
1702 | vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce), addr, slpte, |
1703 | access_flags, level); |
1704 | out: |
1705 | vtd_iommu_unlock(s); |
1706 | entry->iova = addr & page_mask; |
1707 | entry->translated_addr = vtd_get_slpte_addr(slpte, s->aw_bits) & page_mask; |
1708 | entry->addr_mask = ~page_mask; |
1709 | entry->perm = access_flags; |
1710 | return true; |
1711 | |
1712 | error: |
1713 | vtd_iommu_unlock(s); |
1714 | entry->iova = 0; |
1715 | entry->translated_addr = 0; |
1716 | entry->addr_mask = 0; |
1717 | entry->perm = IOMMU_NONE; |
1718 | return false; |
1719 | } |
1720 | |
1721 | static void vtd_root_table_setup(IntelIOMMUState *s) |
1722 | { |
1723 | s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG); |
1724 | s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits); |
1725 | |
1726 | vtd_update_scalable_state(s); |
1727 | |
1728 | trace_vtd_reg_dmar_root(s->root, s->root_scalable); |
1729 | } |
1730 | |
1731 | static void vtd_iec_notify_all(IntelIOMMUState *s, bool global, |
1732 | uint32_t index, uint32_t mask) |
1733 | { |
1734 | x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask); |
1735 | } |
1736 | |
1737 | static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s) |
1738 | { |
1739 | uint64_t value = 0; |
1740 | value = vtd_get_quad_raw(s, DMAR_IRTA_REG); |
1741 | s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1); |
1742 | s->intr_root = value & VTD_IRTA_ADDR_MASK(s->aw_bits); |
1743 | s->intr_eime = value & VTD_IRTA_EIME; |
1744 | |
1745 | /* Notify global invalidation */ |
1746 | vtd_iec_notify_all(s, true, 0, 0); |
1747 | |
1748 | trace_vtd_reg_ir_root(s->intr_root, s->intr_size); |
1749 | } |
1750 | |
1751 | static void vtd_iommu_replay_all(IntelIOMMUState *s) |
1752 | { |
1753 | VTDAddressSpace *vtd_as; |
1754 | |
1755 | QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { |
1756 | vtd_sync_shadow_page_table(vtd_as); |
1757 | } |
1758 | } |
1759 | |
1760 | static void vtd_context_global_invalidate(IntelIOMMUState *s) |
1761 | { |
1762 | trace_vtd_inv_desc_cc_global(); |
1763 | /* Protects context cache */ |
1764 | vtd_iommu_lock(s); |
1765 | s->context_cache_gen++; |
1766 | if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) { |
1767 | vtd_reset_context_cache_locked(s); |
1768 | } |
1769 | vtd_iommu_unlock(s); |
1770 | vtd_address_space_refresh_all(s); |
1771 | /* |
1772 | * From VT-d spec 6.5.2.1, a global context entry invalidation |
1773 | * should be followed by a IOTLB global invalidation, so we should |
1774 | * be safe even without this. Hoewever, let's replay the region as |
1775 | * well to be safer, and go back here when we need finer tunes for |
1776 | * VT-d emulation codes. |
1777 | */ |
1778 | vtd_iommu_replay_all(s); |
1779 | } |
1780 | |
1781 | /* Do a context-cache device-selective invalidation. |
1782 | * @func_mask: FM field after shifting |
1783 | */ |
1784 | static void vtd_context_device_invalidate(IntelIOMMUState *s, |
1785 | uint16_t source_id, |
1786 | uint16_t func_mask) |
1787 | { |
1788 | uint16_t mask; |
1789 | VTDBus *vtd_bus; |
1790 | VTDAddressSpace *vtd_as; |
1791 | uint8_t bus_n, devfn; |
1792 | uint16_t devfn_it; |
1793 | |
1794 | trace_vtd_inv_desc_cc_devices(source_id, func_mask); |
1795 | |
1796 | switch (func_mask & 3) { |
1797 | case 0: |
1798 | mask = 0; /* No bits in the SID field masked */ |
1799 | break; |
1800 | case 1: |
1801 | mask = 4; /* Mask bit 2 in the SID field */ |
1802 | break; |
1803 | case 2: |
1804 | mask = 6; /* Mask bit 2:1 in the SID field */ |
1805 | break; |
1806 | case 3: |
1807 | mask = 7; /* Mask bit 2:0 in the SID field */ |
1808 | break; |
1809 | } |
1810 | mask = ~mask; |
1811 | |
1812 | bus_n = VTD_SID_TO_BUS(source_id); |
1813 | vtd_bus = vtd_find_as_from_bus_num(s, bus_n); |
1814 | if (vtd_bus) { |
1815 | devfn = VTD_SID_TO_DEVFN(source_id); |
1816 | for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { |
1817 | vtd_as = vtd_bus->dev_as[devfn_it]; |
1818 | if (vtd_as && ((devfn_it & mask) == (devfn & mask))) { |
1819 | trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it), |
1820 | VTD_PCI_FUNC(devfn_it)); |
1821 | vtd_iommu_lock(s); |
1822 | vtd_as->context_cache_entry.context_cache_gen = 0; |
1823 | vtd_iommu_unlock(s); |
1824 | /* |
1825 | * Do switch address space when needed, in case if the |
1826 | * device passthrough bit is switched. |
1827 | */ |
1828 | vtd_switch_address_space(vtd_as); |
1829 | /* |
1830 | * So a device is moving out of (or moving into) a |
1831 | * domain, resync the shadow page table. |
1832 | * This won't bring bad even if we have no such |
1833 | * notifier registered - the IOMMU notification |
1834 | * framework will skip MAP notifications if that |
1835 | * happened. |
1836 | */ |
1837 | vtd_sync_shadow_page_table(vtd_as); |
1838 | } |
1839 | } |
1840 | } |
1841 | } |
1842 | |
1843 | /* Context-cache invalidation |
1844 | * Returns the Context Actual Invalidation Granularity. |
1845 | * @val: the content of the CCMD_REG |
1846 | */ |
1847 | static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val) |
1848 | { |
1849 | uint64_t caig; |
1850 | uint64_t type = val & VTD_CCMD_CIRG_MASK; |
1851 | |
1852 | switch (type) { |
1853 | case VTD_CCMD_DOMAIN_INVL: |
1854 | /* Fall through */ |
1855 | case VTD_CCMD_GLOBAL_INVL: |
1856 | caig = VTD_CCMD_GLOBAL_INVL_A; |
1857 | vtd_context_global_invalidate(s); |
1858 | break; |
1859 | |
1860 | case VTD_CCMD_DEVICE_INVL: |
1861 | caig = VTD_CCMD_DEVICE_INVL_A; |
1862 | vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val)); |
1863 | break; |
1864 | |
1865 | default: |
1866 | error_report_once("%s: invalid context: 0x%" PRIx64, |
1867 | __func__, val); |
1868 | caig = 0; |
1869 | } |
1870 | return caig; |
1871 | } |
1872 | |
1873 | static void vtd_iotlb_global_invalidate(IntelIOMMUState *s) |
1874 | { |
1875 | trace_vtd_inv_desc_iotlb_global(); |
1876 | vtd_reset_iotlb(s); |
1877 | vtd_iommu_replay_all(s); |
1878 | } |
1879 | |
1880 | static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) |
1881 | { |
1882 | VTDContextEntry ce; |
1883 | VTDAddressSpace *vtd_as; |
1884 | |
1885 | trace_vtd_inv_desc_iotlb_domain(domain_id); |
1886 | |
1887 | vtd_iommu_lock(s); |
1888 | g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain, |
1889 | &domain_id); |
1890 | vtd_iommu_unlock(s); |
1891 | |
1892 | QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { |
1893 | if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), |
1894 | vtd_as->devfn, &ce) && |
1895 | domain_id == vtd_get_domain_id(s, &ce)) { |
1896 | vtd_sync_shadow_page_table(vtd_as); |
1897 | } |
1898 | } |
1899 | } |
1900 | |
1901 | static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, |
1902 | uint16_t domain_id, hwaddr addr, |
1903 | uint8_t am) |
1904 | { |
1905 | VTDAddressSpace *vtd_as; |
1906 | VTDContextEntry ce; |
1907 | int ret; |
1908 | hwaddr size = (1 << am) * VTD_PAGE_SIZE; |
1909 | |
1910 | QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { |
1911 | ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), |
1912 | vtd_as->devfn, &ce); |
1913 | if (!ret && domain_id == vtd_get_domain_id(s, &ce)) { |
1914 | if (vtd_as_has_map_notifier(vtd_as)) { |
1915 | /* |
1916 | * As long as we have MAP notifications registered in |
1917 | * any of our IOMMU notifiers, we need to sync the |
1918 | * shadow page table. |
1919 | */ |
1920 | vtd_sync_shadow_page_table_range(vtd_as, &ce, addr, size); |
1921 | } else { |
1922 | /* |
1923 | * For UNMAP-only notifiers, we don't need to walk the |
1924 | * page tables. We just deliver the PSI down to |
1925 | * invalidate caches. |
1926 | */ |
1927 | IOMMUTLBEntry entry = { |
1928 | .target_as = &address_space_memory, |
1929 | .iova = addr, |
1930 | .translated_addr = 0, |
1931 | .addr_mask = size - 1, |
1932 | .perm = IOMMU_NONE, |
1933 | }; |
1934 | memory_region_notify_iommu(&vtd_as->iommu, 0, entry); |
1935 | } |
1936 | } |
1937 | } |
1938 | } |
1939 | |
1940 | static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, |
1941 | hwaddr addr, uint8_t am) |
1942 | { |
1943 | VTDIOTLBPageInvInfo info; |
1944 | |
1945 | trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am); |
1946 | |
1947 | assert(am <= VTD_MAMV); |
1948 | info.domain_id = domain_id; |
1949 | info.addr = addr; |
1950 | info.mask = ~((1 << am) - 1); |
1951 | vtd_iommu_lock(s); |
1952 | g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); |
1953 | vtd_iommu_unlock(s); |
1954 | vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am); |
1955 | } |
1956 | |
1957 | /* Flush IOTLB |
1958 | * Returns the IOTLB Actual Invalidation Granularity. |
1959 | * @val: the content of the IOTLB_REG |
1960 | */ |
1961 | static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val) |
1962 | { |
1963 | uint64_t iaig; |
1964 | uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK; |
1965 | uint16_t domain_id; |
1966 | hwaddr addr; |
1967 | uint8_t am; |
1968 | |
1969 | switch (type) { |
1970 | case VTD_TLB_GLOBAL_FLUSH: |
1971 | iaig = VTD_TLB_GLOBAL_FLUSH_A; |
1972 | vtd_iotlb_global_invalidate(s); |
1973 | break; |
1974 | |
1975 | case VTD_TLB_DSI_FLUSH: |
1976 | domain_id = VTD_TLB_DID(val); |
1977 | iaig = VTD_TLB_DSI_FLUSH_A; |
1978 | vtd_iotlb_domain_invalidate(s, domain_id); |
1979 | break; |
1980 | |
1981 | case VTD_TLB_PSI_FLUSH: |
1982 | domain_id = VTD_TLB_DID(val); |
1983 | addr = vtd_get_quad_raw(s, DMAR_IVA_REG); |
1984 | am = VTD_IVA_AM(addr); |
1985 | addr = VTD_IVA_ADDR(addr); |
1986 | if (am > VTD_MAMV) { |
1987 | error_report_once("%s: address mask overflow: 0x%" PRIx64, |
1988 | __func__, vtd_get_quad_raw(s, DMAR_IVA_REG)); |
1989 | iaig = 0; |
1990 | break; |
1991 | } |
1992 | iaig = VTD_TLB_PSI_FLUSH_A; |
1993 | vtd_iotlb_page_invalidate(s, domain_id, addr, am); |
1994 | break; |
1995 | |
1996 | default: |
1997 | error_report_once("%s: invalid granularity: 0x%" PRIx64, |
1998 | __func__, val); |
1999 | iaig = 0; |
2000 | } |
2001 | return iaig; |
2002 | } |
2003 | |
2004 | static void vtd_fetch_inv_desc(IntelIOMMUState *s); |
2005 | |
2006 | static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s) |
2007 | { |
2008 | return s->qi_enabled && (s->iq_tail == s->iq_head) && |
2009 | (s->iq_last_desc_type == VTD_INV_DESC_WAIT); |
2010 | } |
2011 | |
2012 | static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en) |
2013 | { |
2014 | uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG); |
2015 | |
2016 | trace_vtd_inv_qi_enable(en); |
2017 | |
2018 | if (en) { |
2019 | s->iq = iqa_val & VTD_IQA_IQA_MASK(s->aw_bits); |
2020 | /* 2^(x+8) entries */ |
2021 | s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8 - (s->iq_dw ? 1 : 0)); |
2022 | s->qi_enabled = true; |
2023 | trace_vtd_inv_qi_setup(s->iq, s->iq_size); |
2024 | /* Ok - report back to driver */ |
2025 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES); |
2026 | |
2027 | if (s->iq_tail != 0) { |
2028 | /* |
2029 | * This is a spec violation but Windows guests are known to set up |
2030 | * Queued Invalidation this way so we allow the write and process |
2031 | * Invalidation Descriptors right away. |
2032 | */ |
2033 | trace_vtd_warn_invalid_qi_tail(s->iq_tail); |
2034 | if (!(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { |
2035 | vtd_fetch_inv_desc(s); |
2036 | } |
2037 | } |
2038 | } else { |
2039 | if (vtd_queued_inv_disable_check(s)) { |
2040 | /* disable Queued Invalidation */ |
2041 | vtd_set_quad_raw(s, DMAR_IQH_REG, 0); |
2042 | s->iq_head = 0; |
2043 | s->qi_enabled = false; |
2044 | /* Ok - report back to driver */ |
2045 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0); |
2046 | } else { |
2047 | error_report_once("%s: detected improper state when disable QI " |
2048 | "(head=0x%x, tail=0x%x, last_type=%d)" , |
2049 | __func__, |
2050 | s->iq_head, s->iq_tail, s->iq_last_desc_type); |
2051 | } |
2052 | } |
2053 | } |
2054 | |
2055 | /* Set Root Table Pointer */ |
2056 | static void vtd_handle_gcmd_srtp(IntelIOMMUState *s) |
2057 | { |
2058 | vtd_root_table_setup(s); |
2059 | /* Ok - report back to driver */ |
2060 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS); |
2061 | vtd_reset_caches(s); |
2062 | vtd_address_space_refresh_all(s); |
2063 | } |
2064 | |
2065 | /* Set Interrupt Remap Table Pointer */ |
2066 | static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s) |
2067 | { |
2068 | vtd_interrupt_remap_table_setup(s); |
2069 | /* Ok - report back to driver */ |
2070 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS); |
2071 | } |
2072 | |
2073 | /* Handle Translation Enable/Disable */ |
2074 | static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en) |
2075 | { |
2076 | if (s->dmar_enabled == en) { |
2077 | return; |
2078 | } |
2079 | |
2080 | trace_vtd_dmar_enable(en); |
2081 | |
2082 | if (en) { |
2083 | s->dmar_enabled = true; |
2084 | /* Ok - report back to driver */ |
2085 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES); |
2086 | } else { |
2087 | s->dmar_enabled = false; |
2088 | |
2089 | /* Clear the index of Fault Recording Register */ |
2090 | s->next_frcd_reg = 0; |
2091 | /* Ok - report back to driver */ |
2092 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0); |
2093 | } |
2094 | |
2095 | vtd_reset_caches(s); |
2096 | vtd_address_space_refresh_all(s); |
2097 | } |
2098 | |
2099 | /* Handle Interrupt Remap Enable/Disable */ |
2100 | static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en) |
2101 | { |
2102 | trace_vtd_ir_enable(en); |
2103 | |
2104 | if (en) { |
2105 | s->intr_enabled = true; |
2106 | /* Ok - report back to driver */ |
2107 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES); |
2108 | } else { |
2109 | s->intr_enabled = false; |
2110 | /* Ok - report back to driver */ |
2111 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0); |
2112 | } |
2113 | } |
2114 | |
2115 | /* Handle write to Global Command Register */ |
2116 | static void vtd_handle_gcmd_write(IntelIOMMUState *s) |
2117 | { |
2118 | uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG); |
2119 | uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG); |
2120 | uint32_t changed = status ^ val; |
2121 | |
2122 | trace_vtd_reg_write_gcmd(status, val); |
2123 | if (changed & VTD_GCMD_TE) { |
2124 | /* Translation enable/disable */ |
2125 | vtd_handle_gcmd_te(s, val & VTD_GCMD_TE); |
2126 | } |
2127 | if (val & VTD_GCMD_SRTP) { |
2128 | /* Set/update the root-table pointer */ |
2129 | vtd_handle_gcmd_srtp(s); |
2130 | } |
2131 | if (changed & VTD_GCMD_QIE) { |
2132 | /* Queued Invalidation Enable */ |
2133 | vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE); |
2134 | } |
2135 | if (val & VTD_GCMD_SIRTP) { |
2136 | /* Set/update the interrupt remapping root-table pointer */ |
2137 | vtd_handle_gcmd_sirtp(s); |
2138 | } |
2139 | if (changed & VTD_GCMD_IRE) { |
2140 | /* Interrupt remap enable/disable */ |
2141 | vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE); |
2142 | } |
2143 | } |
2144 | |
2145 | /* Handle write to Context Command Register */ |
2146 | static void vtd_handle_ccmd_write(IntelIOMMUState *s) |
2147 | { |
2148 | uint64_t ret; |
2149 | uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG); |
2150 | |
2151 | /* Context-cache invalidation request */ |
2152 | if (val & VTD_CCMD_ICC) { |
2153 | if (s->qi_enabled) { |
2154 | error_report_once("Queued Invalidation enabled, " |
2155 | "should not use register-based invalidation" ); |
2156 | return; |
2157 | } |
2158 | ret = vtd_context_cache_invalidate(s, val); |
2159 | /* Invalidation completed. Change something to show */ |
2160 | vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL); |
2161 | ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK, |
2162 | ret); |
2163 | } |
2164 | } |
2165 | |
2166 | /* Handle write to IOTLB Invalidation Register */ |
2167 | static void vtd_handle_iotlb_write(IntelIOMMUState *s) |
2168 | { |
2169 | uint64_t ret; |
2170 | uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG); |
2171 | |
2172 | /* IOTLB invalidation request */ |
2173 | if (val & VTD_TLB_IVT) { |
2174 | if (s->qi_enabled) { |
2175 | error_report_once("Queued Invalidation enabled, " |
2176 | "should not use register-based invalidation" ); |
2177 | return; |
2178 | } |
2179 | ret = vtd_iotlb_flush(s, val); |
2180 | /* Invalidation completed. Change something to show */ |
2181 | vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL); |
2182 | ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, |
2183 | VTD_TLB_FLUSH_GRANU_MASK_A, ret); |
2184 | } |
2185 | } |
2186 | |
2187 | /* Fetch an Invalidation Descriptor from the Invalidation Queue */ |
2188 | static bool vtd_get_inv_desc(IntelIOMMUState *s, |
2189 | VTDInvDesc *inv_desc) |
2190 | { |
2191 | dma_addr_t base_addr = s->iq; |
2192 | uint32_t offset = s->iq_head; |
2193 | uint32_t dw = s->iq_dw ? 32 : 16; |
2194 | dma_addr_t addr = base_addr + offset * dw; |
2195 | |
2196 | if (dma_memory_read(&address_space_memory, addr, inv_desc, dw)) { |
2197 | error_report_once("Read INV DESC failed." ); |
2198 | return false; |
2199 | } |
2200 | inv_desc->lo = le64_to_cpu(inv_desc->lo); |
2201 | inv_desc->hi = le64_to_cpu(inv_desc->hi); |
2202 | if (dw == 32) { |
2203 | inv_desc->val[2] = le64_to_cpu(inv_desc->val[2]); |
2204 | inv_desc->val[3] = le64_to_cpu(inv_desc->val[3]); |
2205 | } |
2206 | return true; |
2207 | } |
2208 | |
2209 | static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) |
2210 | { |
2211 | if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) || |
2212 | (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) { |
2213 | error_report_once("%s: invalid wait desc: hi=%" PRIx64", lo=%" PRIx64 |
2214 | " (reserved nonzero)" , __func__, inv_desc->hi, |
2215 | inv_desc->lo); |
2216 | return false; |
2217 | } |
2218 | if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { |
2219 | /* Status Write */ |
2220 | uint32_t status_data = (uint32_t)(inv_desc->lo >> |
2221 | VTD_INV_DESC_WAIT_DATA_SHIFT); |
2222 | |
2223 | assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF)); |
2224 | |
2225 | /* FIXME: need to be masked with HAW? */ |
2226 | dma_addr_t status_addr = inv_desc->hi; |
2227 | trace_vtd_inv_desc_wait_sw(status_addr, status_data); |
2228 | status_data = cpu_to_le32(status_data); |
2229 | if (dma_memory_write(&address_space_memory, status_addr, &status_data, |
2230 | sizeof(status_data))) { |
2231 | trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); |
2232 | return false; |
2233 | } |
2234 | } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { |
2235 | /* Interrupt flag */ |
2236 | vtd_generate_completion_event(s); |
2237 | } else { |
2238 | error_report_once("%s: invalid wait desc: hi=%" PRIx64", lo=%" PRIx64 |
2239 | " (unknown type)" , __func__, inv_desc->hi, |
2240 | inv_desc->lo); |
2241 | return false; |
2242 | } |
2243 | return true; |
2244 | } |
2245 | |
2246 | static bool vtd_process_context_cache_desc(IntelIOMMUState *s, |
2247 | VTDInvDesc *inv_desc) |
2248 | { |
2249 | uint16_t sid, fmask; |
2250 | |
2251 | if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) { |
2252 | error_report_once("%s: invalid cc inv desc: hi=%" PRIx64", lo=%" PRIx64 |
2253 | " (reserved nonzero)" , __func__, inv_desc->hi, |
2254 | inv_desc->lo); |
2255 | return false; |
2256 | } |
2257 | switch (inv_desc->lo & VTD_INV_DESC_CC_G) { |
2258 | case VTD_INV_DESC_CC_DOMAIN: |
2259 | trace_vtd_inv_desc_cc_domain( |
2260 | (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo)); |
2261 | /* Fall through */ |
2262 | case VTD_INV_DESC_CC_GLOBAL: |
2263 | vtd_context_global_invalidate(s); |
2264 | break; |
2265 | |
2266 | case VTD_INV_DESC_CC_DEVICE: |
2267 | sid = VTD_INV_DESC_CC_SID(inv_desc->lo); |
2268 | fmask = VTD_INV_DESC_CC_FM(inv_desc->lo); |
2269 | vtd_context_device_invalidate(s, sid, fmask); |
2270 | break; |
2271 | |
2272 | default: |
2273 | error_report_once("%s: invalid cc inv desc: hi=%" PRIx64", lo=%" PRIx64 |
2274 | " (invalid type)" , __func__, inv_desc->hi, |
2275 | inv_desc->lo); |
2276 | return false; |
2277 | } |
2278 | return true; |
2279 | } |
2280 | |
2281 | static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) |
2282 | { |
2283 | uint16_t domain_id; |
2284 | uint8_t am; |
2285 | hwaddr addr; |
2286 | |
2287 | if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) || |
2288 | (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) { |
2289 | error_report_once("%s: invalid iotlb inv desc: hi=0x%" PRIx64 |
2290 | ", lo=0x%" PRIx64" (reserved bits unzero)\n" , |
2291 | __func__, inv_desc->hi, inv_desc->lo); |
2292 | return false; |
2293 | } |
2294 | |
2295 | switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) { |
2296 | case VTD_INV_DESC_IOTLB_GLOBAL: |
2297 | vtd_iotlb_global_invalidate(s); |
2298 | break; |
2299 | |
2300 | case VTD_INV_DESC_IOTLB_DOMAIN: |
2301 | domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); |
2302 | vtd_iotlb_domain_invalidate(s, domain_id); |
2303 | break; |
2304 | |
2305 | case VTD_INV_DESC_IOTLB_PAGE: |
2306 | domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); |
2307 | addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi); |
2308 | am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi); |
2309 | if (am > VTD_MAMV) { |
2310 | error_report_once("%s: invalid iotlb inv desc: hi=0x%" PRIx64 |
2311 | ", lo=0x%" PRIx64" (am=%u > VTD_MAMV=%u)\n" , |
2312 | __func__, inv_desc->hi, inv_desc->lo, |
2313 | am, (unsigned)VTD_MAMV); |
2314 | return false; |
2315 | } |
2316 | vtd_iotlb_page_invalidate(s, domain_id, addr, am); |
2317 | break; |
2318 | |
2319 | default: |
2320 | error_report_once("%s: invalid iotlb inv desc: hi=0x%" PRIx64 |
2321 | ", lo=0x%" PRIx64" (type mismatch: 0x%llx)\n" , |
2322 | __func__, inv_desc->hi, inv_desc->lo, |
2323 | inv_desc->lo & VTD_INV_DESC_IOTLB_G); |
2324 | return false; |
2325 | } |
2326 | return true; |
2327 | } |
2328 | |
2329 | static bool vtd_process_inv_iec_desc(IntelIOMMUState *s, |
2330 | VTDInvDesc *inv_desc) |
2331 | { |
2332 | trace_vtd_inv_desc_iec(inv_desc->iec.granularity, |
2333 | inv_desc->iec.index, |
2334 | inv_desc->iec.index_mask); |
2335 | |
2336 | vtd_iec_notify_all(s, !inv_desc->iec.granularity, |
2337 | inv_desc->iec.index, |
2338 | inv_desc->iec.index_mask); |
2339 | return true; |
2340 | } |
2341 | |
2342 | static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, |
2343 | VTDInvDesc *inv_desc) |
2344 | { |
2345 | VTDAddressSpace *vtd_dev_as; |
2346 | IOMMUTLBEntry entry; |
2347 | struct VTDBus *vtd_bus; |
2348 | hwaddr addr; |
2349 | uint64_t sz; |
2350 | uint16_t sid; |
2351 | uint8_t devfn; |
2352 | bool size; |
2353 | uint8_t bus_num; |
2354 | |
2355 | addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); |
2356 | sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); |
2357 | devfn = sid & 0xff; |
2358 | bus_num = sid >> 8; |
2359 | size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); |
2360 | |
2361 | if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) || |
2362 | (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) { |
2363 | error_report_once("%s: invalid dev-iotlb inv desc: hi=%" PRIx64 |
2364 | ", lo=%" PRIx64" (reserved nonzero)" , __func__, |
2365 | inv_desc->hi, inv_desc->lo); |
2366 | return false; |
2367 | } |
2368 | |
2369 | vtd_bus = vtd_find_as_from_bus_num(s, bus_num); |
2370 | if (!vtd_bus) { |
2371 | goto done; |
2372 | } |
2373 | |
2374 | vtd_dev_as = vtd_bus->dev_as[devfn]; |
2375 | if (!vtd_dev_as) { |
2376 | goto done; |
2377 | } |
2378 | |
2379 | /* According to ATS spec table 2.4: |
2380 | * S = 0, bits 15:12 = xxxx range size: 4K |
2381 | * S = 1, bits 15:12 = xxx0 range size: 8K |
2382 | * S = 1, bits 15:12 = xx01 range size: 16K |
2383 | * S = 1, bits 15:12 = x011 range size: 32K |
2384 | * S = 1, bits 15:12 = 0111 range size: 64K |
2385 | * ... |
2386 | */ |
2387 | if (size) { |
2388 | sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT); |
2389 | addr &= ~(sz - 1); |
2390 | } else { |
2391 | sz = VTD_PAGE_SIZE; |
2392 | } |
2393 | |
2394 | entry.target_as = &vtd_dev_as->as; |
2395 | entry.addr_mask = sz - 1; |
2396 | entry.iova = addr; |
2397 | entry.perm = IOMMU_NONE; |
2398 | entry.translated_addr = 0; |
2399 | memory_region_notify_iommu(&vtd_dev_as->iommu, 0, entry); |
2400 | |
2401 | done: |
2402 | return true; |
2403 | } |
2404 | |
2405 | static bool vtd_process_inv_desc(IntelIOMMUState *s) |
2406 | { |
2407 | VTDInvDesc inv_desc; |
2408 | uint8_t desc_type; |
2409 | |
2410 | trace_vtd_inv_qi_head(s->iq_head); |
2411 | if (!vtd_get_inv_desc(s, &inv_desc)) { |
2412 | s->iq_last_desc_type = VTD_INV_DESC_NONE; |
2413 | return false; |
2414 | } |
2415 | |
2416 | desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; |
2417 | /* FIXME: should update at first or at last? */ |
2418 | s->iq_last_desc_type = desc_type; |
2419 | |
2420 | switch (desc_type) { |
2421 | case VTD_INV_DESC_CC: |
2422 | trace_vtd_inv_desc("context-cache" , inv_desc.hi, inv_desc.lo); |
2423 | if (!vtd_process_context_cache_desc(s, &inv_desc)) { |
2424 | return false; |
2425 | } |
2426 | break; |
2427 | |
2428 | case VTD_INV_DESC_IOTLB: |
2429 | trace_vtd_inv_desc("iotlb" , inv_desc.hi, inv_desc.lo); |
2430 | if (!vtd_process_iotlb_desc(s, &inv_desc)) { |
2431 | return false; |
2432 | } |
2433 | break; |
2434 | |
2435 | /* |
2436 | * TODO: the entity of below two cases will be implemented in future series. |
2437 | * To make guest (which integrates scalable mode support patch set in |
2438 | * iommu driver) work, just return true is enough so far. |
2439 | */ |
2440 | case VTD_INV_DESC_PC: |
2441 | break; |
2442 | |
2443 | case VTD_INV_DESC_PIOTLB: |
2444 | break; |
2445 | |
2446 | case VTD_INV_DESC_WAIT: |
2447 | trace_vtd_inv_desc("wait" , inv_desc.hi, inv_desc.lo); |
2448 | if (!vtd_process_wait_desc(s, &inv_desc)) { |
2449 | return false; |
2450 | } |
2451 | break; |
2452 | |
2453 | case VTD_INV_DESC_IEC: |
2454 | trace_vtd_inv_desc("iec" , inv_desc.hi, inv_desc.lo); |
2455 | if (!vtd_process_inv_iec_desc(s, &inv_desc)) { |
2456 | return false; |
2457 | } |
2458 | break; |
2459 | |
2460 | case VTD_INV_DESC_DEVICE: |
2461 | trace_vtd_inv_desc("device" , inv_desc.hi, inv_desc.lo); |
2462 | if (!vtd_process_device_iotlb_desc(s, &inv_desc)) { |
2463 | return false; |
2464 | } |
2465 | break; |
2466 | |
2467 | default: |
2468 | error_report_once("%s: invalid inv desc: hi=%" PRIx64", lo=%" PRIx64 |
2469 | " (unknown type)" , __func__, inv_desc.hi, |
2470 | inv_desc.lo); |
2471 | return false; |
2472 | } |
2473 | s->iq_head++; |
2474 | if (s->iq_head == s->iq_size) { |
2475 | s->iq_head = 0; |
2476 | } |
2477 | return true; |
2478 | } |
2479 | |
2480 | /* Try to fetch and process more Invalidation Descriptors */ |
2481 | static void vtd_fetch_inv_desc(IntelIOMMUState *s) |
2482 | { |
2483 | trace_vtd_inv_qi_fetch(); |
2484 | |
2485 | if (s->iq_tail >= s->iq_size) { |
2486 | /* Detects an invalid Tail pointer */ |
2487 | error_report_once("%s: detected invalid QI tail " |
2488 | "(tail=0x%x, size=0x%x)" , |
2489 | __func__, s->iq_tail, s->iq_size); |
2490 | vtd_handle_inv_queue_error(s); |
2491 | return; |
2492 | } |
2493 | while (s->iq_head != s->iq_tail) { |
2494 | if (!vtd_process_inv_desc(s)) { |
2495 | /* Invalidation Queue Errors */ |
2496 | vtd_handle_inv_queue_error(s); |
2497 | break; |
2498 | } |
2499 | /* Must update the IQH_REG in time */ |
2500 | vtd_set_quad_raw(s, DMAR_IQH_REG, |
2501 | (((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) & |
2502 | VTD_IQH_QH_MASK); |
2503 | } |
2504 | } |
2505 | |
2506 | /* Handle write to Invalidation Queue Tail Register */ |
2507 | static void vtd_handle_iqt_write(IntelIOMMUState *s) |
2508 | { |
2509 | uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG); |
2510 | |
2511 | if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { |
2512 | error_report_once("%s: RSV bit is set: val=0x%" PRIx64, |
2513 | __func__, val); |
2514 | return; |
2515 | } |
2516 | s->iq_tail = VTD_IQT_QT(s->iq_dw, val); |
2517 | trace_vtd_inv_qi_tail(s->iq_tail); |
2518 | |
2519 | if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { |
2520 | /* Process Invalidation Queue here */ |
2521 | vtd_fetch_inv_desc(s); |
2522 | } |
2523 | } |
2524 | |
2525 | static void vtd_handle_fsts_write(IntelIOMMUState *s) |
2526 | { |
2527 | uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); |
2528 | uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); |
2529 | uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE; |
2530 | |
2531 | if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) { |
2532 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); |
2533 | trace_vtd_fsts_clear_ip(); |
2534 | } |
2535 | /* FIXME: when IQE is Clear, should we try to fetch some Invalidation |
2536 | * Descriptors if there are any when Queued Invalidation is enabled? |
2537 | */ |
2538 | } |
2539 | |
2540 | static void vtd_handle_fectl_write(IntelIOMMUState *s) |
2541 | { |
2542 | uint32_t fectl_reg; |
2543 | /* FIXME: when software clears the IM field, check the IP field. But do we |
2544 | * need to compare the old value and the new value to conclude that |
2545 | * software clears the IM field? Or just check if the IM field is zero? |
2546 | */ |
2547 | fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); |
2548 | |
2549 | trace_vtd_reg_write_fectl(fectl_reg); |
2550 | |
2551 | if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) { |
2552 | vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); |
2553 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); |
2554 | } |
2555 | } |
2556 | |
2557 | static void vtd_handle_ics_write(IntelIOMMUState *s) |
2558 | { |
2559 | uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG); |
2560 | uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); |
2561 | |
2562 | if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) { |
2563 | trace_vtd_reg_ics_clear_ip(); |
2564 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); |
2565 | } |
2566 | } |
2567 | |
2568 | static void vtd_handle_iectl_write(IntelIOMMUState *s) |
2569 | { |
2570 | uint32_t iectl_reg; |
2571 | /* FIXME: when software clears the IM field, check the IP field. But do we |
2572 | * need to compare the old value and the new value to conclude that |
2573 | * software clears the IM field? Or just check if the IM field is zero? |
2574 | */ |
2575 | iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); |
2576 | |
2577 | trace_vtd_reg_write_iectl(iectl_reg); |
2578 | |
2579 | if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) { |
2580 | vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); |
2581 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); |
2582 | } |
2583 | } |
2584 | |
2585 | static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size) |
2586 | { |
2587 | IntelIOMMUState *s = opaque; |
2588 | uint64_t val; |
2589 | |
2590 | trace_vtd_reg_read(addr, size); |
2591 | |
2592 | if (addr + size > DMAR_REG_SIZE) { |
2593 | error_report_once("%s: MMIO over range: addr=0x%" PRIx64 |
2594 | " size=0x%u" , __func__, addr, size); |
2595 | return (uint64_t)-1; |
2596 | } |
2597 | |
2598 | switch (addr) { |
2599 | /* Root Table Address Register, 64-bit */ |
2600 | case DMAR_RTADDR_REG: |
2601 | if (size == 4) { |
2602 | val = s->root & ((1ULL << 32) - 1); |
2603 | } else { |
2604 | val = s->root; |
2605 | } |
2606 | break; |
2607 | |
2608 | case DMAR_RTADDR_REG_HI: |
2609 | assert(size == 4); |
2610 | val = s->root >> 32; |
2611 | break; |
2612 | |
2613 | /* Invalidation Queue Address Register, 64-bit */ |
2614 | case DMAR_IQA_REG: |
2615 | val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS); |
2616 | if (size == 4) { |
2617 | val = val & ((1ULL << 32) - 1); |
2618 | } |
2619 | break; |
2620 | |
2621 | case DMAR_IQA_REG_HI: |
2622 | assert(size == 4); |
2623 | val = s->iq >> 32; |
2624 | break; |
2625 | |
2626 | default: |
2627 | if (size == 4) { |
2628 | val = vtd_get_long(s, addr); |
2629 | } else { |
2630 | val = vtd_get_quad(s, addr); |
2631 | } |
2632 | } |
2633 | |
2634 | return val; |
2635 | } |
2636 | |
2637 | static void vtd_mem_write(void *opaque, hwaddr addr, |
2638 | uint64_t val, unsigned size) |
2639 | { |
2640 | IntelIOMMUState *s = opaque; |
2641 | |
2642 | trace_vtd_reg_write(addr, size, val); |
2643 | |
2644 | if (addr + size > DMAR_REG_SIZE) { |
2645 | error_report_once("%s: MMIO over range: addr=0x%" PRIx64 |
2646 | " size=0x%u" , __func__, addr, size); |
2647 | return; |
2648 | } |
2649 | |
2650 | switch (addr) { |
2651 | /* Global Command Register, 32-bit */ |
2652 | case DMAR_GCMD_REG: |
2653 | vtd_set_long(s, addr, val); |
2654 | vtd_handle_gcmd_write(s); |
2655 | break; |
2656 | |
2657 | /* Context Command Register, 64-bit */ |
2658 | case DMAR_CCMD_REG: |
2659 | if (size == 4) { |
2660 | vtd_set_long(s, addr, val); |
2661 | } else { |
2662 | vtd_set_quad(s, addr, val); |
2663 | vtd_handle_ccmd_write(s); |
2664 | } |
2665 | break; |
2666 | |
2667 | case DMAR_CCMD_REG_HI: |
2668 | assert(size == 4); |
2669 | vtd_set_long(s, addr, val); |
2670 | vtd_handle_ccmd_write(s); |
2671 | break; |
2672 | |
2673 | /* IOTLB Invalidation Register, 64-bit */ |
2674 | case DMAR_IOTLB_REG: |
2675 | if (size == 4) { |
2676 | vtd_set_long(s, addr, val); |
2677 | } else { |
2678 | vtd_set_quad(s, addr, val); |
2679 | vtd_handle_iotlb_write(s); |
2680 | } |
2681 | break; |
2682 | |
2683 | case DMAR_IOTLB_REG_HI: |
2684 | assert(size == 4); |
2685 | vtd_set_long(s, addr, val); |
2686 | vtd_handle_iotlb_write(s); |
2687 | break; |
2688 | |
2689 | /* Invalidate Address Register, 64-bit */ |
2690 | case DMAR_IVA_REG: |
2691 | if (size == 4) { |
2692 | vtd_set_long(s, addr, val); |
2693 | } else { |
2694 | vtd_set_quad(s, addr, val); |
2695 | } |
2696 | break; |
2697 | |
2698 | case DMAR_IVA_REG_HI: |
2699 | assert(size == 4); |
2700 | vtd_set_long(s, addr, val); |
2701 | break; |
2702 | |
2703 | /* Fault Status Register, 32-bit */ |
2704 | case DMAR_FSTS_REG: |
2705 | assert(size == 4); |
2706 | vtd_set_long(s, addr, val); |
2707 | vtd_handle_fsts_write(s); |
2708 | break; |
2709 | |
2710 | /* Fault Event Control Register, 32-bit */ |
2711 | case DMAR_FECTL_REG: |
2712 | assert(size == 4); |
2713 | vtd_set_long(s, addr, val); |
2714 | vtd_handle_fectl_write(s); |
2715 | break; |
2716 | |
2717 | /* Fault Event Data Register, 32-bit */ |
2718 | case DMAR_FEDATA_REG: |
2719 | assert(size == 4); |
2720 | vtd_set_long(s, addr, val); |
2721 | break; |
2722 | |
2723 | /* Fault Event Address Register, 32-bit */ |
2724 | case DMAR_FEADDR_REG: |
2725 | if (size == 4) { |
2726 | vtd_set_long(s, addr, val); |
2727 | } else { |
2728 | /* |
2729 | * While the register is 32-bit only, some guests (Xen...) write to |
2730 | * it with 64-bit. |
2731 | */ |
2732 | vtd_set_quad(s, addr, val); |
2733 | } |
2734 | break; |
2735 | |
2736 | /* Fault Event Upper Address Register, 32-bit */ |
2737 | case DMAR_FEUADDR_REG: |
2738 | assert(size == 4); |
2739 | vtd_set_long(s, addr, val); |
2740 | break; |
2741 | |
2742 | /* Protected Memory Enable Register, 32-bit */ |
2743 | case DMAR_PMEN_REG: |
2744 | assert(size == 4); |
2745 | vtd_set_long(s, addr, val); |
2746 | break; |
2747 | |
2748 | /* Root Table Address Register, 64-bit */ |
2749 | case DMAR_RTADDR_REG: |
2750 | if (size == 4) { |
2751 | vtd_set_long(s, addr, val); |
2752 | } else { |
2753 | vtd_set_quad(s, addr, val); |
2754 | } |
2755 | break; |
2756 | |
2757 | case DMAR_RTADDR_REG_HI: |
2758 | assert(size == 4); |
2759 | vtd_set_long(s, addr, val); |
2760 | break; |
2761 | |
2762 | /* Invalidation Queue Tail Register, 64-bit */ |
2763 | case DMAR_IQT_REG: |
2764 | if (size == 4) { |
2765 | vtd_set_long(s, addr, val); |
2766 | } else { |
2767 | vtd_set_quad(s, addr, val); |
2768 | } |
2769 | vtd_handle_iqt_write(s); |
2770 | break; |
2771 | |
2772 | case DMAR_IQT_REG_HI: |
2773 | assert(size == 4); |
2774 | vtd_set_long(s, addr, val); |
2775 | /* 19:63 of IQT_REG is RsvdZ, do nothing here */ |
2776 | break; |
2777 | |
2778 | /* Invalidation Queue Address Register, 64-bit */ |
2779 | case DMAR_IQA_REG: |
2780 | if (size == 4) { |
2781 | vtd_set_long(s, addr, val); |
2782 | } else { |
2783 | vtd_set_quad(s, addr, val); |
2784 | } |
2785 | if (s->ecap & VTD_ECAP_SMTS && |
2786 | val & VTD_IQA_DW_MASK) { |
2787 | s->iq_dw = true; |
2788 | } else { |
2789 | s->iq_dw = false; |
2790 | } |
2791 | break; |
2792 | |
2793 | case DMAR_IQA_REG_HI: |
2794 | assert(size == 4); |
2795 | vtd_set_long(s, addr, val); |
2796 | break; |
2797 | |
2798 | /* Invalidation Completion Status Register, 32-bit */ |
2799 | case DMAR_ICS_REG: |
2800 | assert(size == 4); |
2801 | vtd_set_long(s, addr, val); |
2802 | vtd_handle_ics_write(s); |
2803 | break; |
2804 | |
2805 | /* Invalidation Event Control Register, 32-bit */ |
2806 | case DMAR_IECTL_REG: |
2807 | assert(size == 4); |
2808 | vtd_set_long(s, addr, val); |
2809 | vtd_handle_iectl_write(s); |
2810 | break; |
2811 | |
2812 | /* Invalidation Event Data Register, 32-bit */ |
2813 | case DMAR_IEDATA_REG: |
2814 | assert(size == 4); |
2815 | vtd_set_long(s, addr, val); |
2816 | break; |
2817 | |
2818 | /* Invalidation Event Address Register, 32-bit */ |
2819 | case DMAR_IEADDR_REG: |
2820 | assert(size == 4); |
2821 | vtd_set_long(s, addr, val); |
2822 | break; |
2823 | |
2824 | /* Invalidation Event Upper Address Register, 32-bit */ |
2825 | case DMAR_IEUADDR_REG: |
2826 | assert(size == 4); |
2827 | vtd_set_long(s, addr, val); |
2828 | break; |
2829 | |
2830 | /* Fault Recording Registers, 128-bit */ |
2831 | case DMAR_FRCD_REG_0_0: |
2832 | if (size == 4) { |
2833 | vtd_set_long(s, addr, val); |
2834 | } else { |
2835 | vtd_set_quad(s, addr, val); |
2836 | } |
2837 | break; |
2838 | |
2839 | case DMAR_FRCD_REG_0_1: |
2840 | assert(size == 4); |
2841 | vtd_set_long(s, addr, val); |
2842 | break; |
2843 | |
2844 | case DMAR_FRCD_REG_0_2: |
2845 | if (size == 4) { |
2846 | vtd_set_long(s, addr, val); |
2847 | } else { |
2848 | vtd_set_quad(s, addr, val); |
2849 | /* May clear bit 127 (Fault), update PPF */ |
2850 | vtd_update_fsts_ppf(s); |
2851 | } |
2852 | break; |
2853 | |
2854 | case DMAR_FRCD_REG_0_3: |
2855 | assert(size == 4); |
2856 | vtd_set_long(s, addr, val); |
2857 | /* May clear bit 127 (Fault), update PPF */ |
2858 | vtd_update_fsts_ppf(s); |
2859 | break; |
2860 | |
2861 | case DMAR_IRTA_REG: |
2862 | if (size == 4) { |
2863 | vtd_set_long(s, addr, val); |
2864 | } else { |
2865 | vtd_set_quad(s, addr, val); |
2866 | } |
2867 | break; |
2868 | |
2869 | case DMAR_IRTA_REG_HI: |
2870 | assert(size == 4); |
2871 | vtd_set_long(s, addr, val); |
2872 | break; |
2873 | |
2874 | default: |
2875 | if (size == 4) { |
2876 | vtd_set_long(s, addr, val); |
2877 | } else { |
2878 | vtd_set_quad(s, addr, val); |
2879 | } |
2880 | } |
2881 | } |
2882 | |
2883 | static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr, |
2884 | IOMMUAccessFlags flag, int iommu_idx) |
2885 | { |
2886 | VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); |
2887 | IntelIOMMUState *s = vtd_as->iommu_state; |
2888 | IOMMUTLBEntry iotlb = { |
2889 | /* We'll fill in the rest later. */ |
2890 | .target_as = &address_space_memory, |
2891 | }; |
2892 | bool success; |
2893 | |
2894 | if (likely(s->dmar_enabled)) { |
2895 | success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, |
2896 | addr, flag & IOMMU_WO, &iotlb); |
2897 | } else { |
2898 | /* DMAR disabled, passthrough, use 4k-page*/ |
2899 | iotlb.iova = addr & VTD_PAGE_MASK_4K; |
2900 | iotlb.translated_addr = addr & VTD_PAGE_MASK_4K; |
2901 | iotlb.addr_mask = ~VTD_PAGE_MASK_4K; |
2902 | iotlb.perm = IOMMU_RW; |
2903 | success = true; |
2904 | } |
2905 | |
2906 | if (likely(success)) { |
2907 | trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus), |
2908 | VTD_PCI_SLOT(vtd_as->devfn), |
2909 | VTD_PCI_FUNC(vtd_as->devfn), |
2910 | iotlb.iova, iotlb.translated_addr, |
2911 | iotlb.addr_mask); |
2912 | } else { |
2913 | error_report_once("%s: detected translation failure " |
2914 | "(dev=%02x:%02x:%02x, iova=0x%" PRIx64 ")" , |
2915 | __func__, pci_bus_num(vtd_as->bus), |
2916 | VTD_PCI_SLOT(vtd_as->devfn), |
2917 | VTD_PCI_FUNC(vtd_as->devfn), |
2918 | addr); |
2919 | } |
2920 | |
2921 | return iotlb; |
2922 | } |
2923 | |
2924 | static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, |
2925 | IOMMUNotifierFlag old, |
2926 | IOMMUNotifierFlag new) |
2927 | { |
2928 | VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); |
2929 | IntelIOMMUState *s = vtd_as->iommu_state; |
2930 | |
2931 | if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) { |
2932 | error_report("We need to set caching-mode=on for intel-iommu to enable " |
2933 | "device assignment with IOMMU protection." ); |
2934 | exit(1); |
2935 | } |
2936 | |
2937 | /* Update per-address-space notifier flags */ |
2938 | vtd_as->notifier_flags = new; |
2939 | |
2940 | if (old == IOMMU_NOTIFIER_NONE) { |
2941 | QLIST_INSERT_HEAD(&s->vtd_as_with_notifiers, vtd_as, next); |
2942 | } else if (new == IOMMU_NOTIFIER_NONE) { |
2943 | QLIST_REMOVE(vtd_as, next); |
2944 | } |
2945 | } |
2946 | |
2947 | static int vtd_post_load(void *opaque, int version_id) |
2948 | { |
2949 | IntelIOMMUState *iommu = opaque; |
2950 | |
2951 | /* |
2952 | * Memory regions are dynamically turned on/off depending on |
2953 | * context entry configurations from the guest. After migration, |
2954 | * we need to make sure the memory regions are still correct. |
2955 | */ |
2956 | vtd_switch_address_space_all(iommu); |
2957 | |
2958 | /* |
2959 | * We don't need to migrate the root_scalable because we can |
2960 | * simply do the calculation after the loading is complete. We |
2961 | * can actually do similar things with root, dmar_enabled, etc. |
2962 | * however since we've had them already so we'd better keep them |
2963 | * for compatibility of migration. |
2964 | */ |
2965 | vtd_update_scalable_state(iommu); |
2966 | |
2967 | return 0; |
2968 | } |
2969 | |
2970 | static const VMStateDescription vtd_vmstate = { |
2971 | .name = "iommu-intel" , |
2972 | .version_id = 1, |
2973 | .minimum_version_id = 1, |
2974 | .priority = MIG_PRI_IOMMU, |
2975 | .post_load = vtd_post_load, |
2976 | .fields = (VMStateField[]) { |
2977 | VMSTATE_UINT64(root, IntelIOMMUState), |
2978 | VMSTATE_UINT64(intr_root, IntelIOMMUState), |
2979 | VMSTATE_UINT64(iq, IntelIOMMUState), |
2980 | VMSTATE_UINT32(intr_size, IntelIOMMUState), |
2981 | VMSTATE_UINT16(iq_head, IntelIOMMUState), |
2982 | VMSTATE_UINT16(iq_tail, IntelIOMMUState), |
2983 | VMSTATE_UINT16(iq_size, IntelIOMMUState), |
2984 | VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState), |
2985 | VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE), |
2986 | VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState), |
2987 | VMSTATE_UNUSED(1), /* bool root_extended is obsolete by VT-d */ |
2988 | VMSTATE_BOOL(dmar_enabled, IntelIOMMUState), |
2989 | VMSTATE_BOOL(qi_enabled, IntelIOMMUState), |
2990 | VMSTATE_BOOL(intr_enabled, IntelIOMMUState), |
2991 | VMSTATE_BOOL(intr_eime, IntelIOMMUState), |
2992 | VMSTATE_END_OF_LIST() |
2993 | } |
2994 | }; |
2995 | |
2996 | static const MemoryRegionOps vtd_mem_ops = { |
2997 | .read = vtd_mem_read, |
2998 | .write = vtd_mem_write, |
2999 | .endianness = DEVICE_LITTLE_ENDIAN, |
3000 | .impl = { |
3001 | .min_access_size = 4, |
3002 | .max_access_size = 8, |
3003 | }, |
3004 | .valid = { |
3005 | .min_access_size = 4, |
3006 | .max_access_size = 8, |
3007 | }, |
3008 | }; |
3009 | |
3010 | static Property vtd_properties[] = { |
3011 | DEFINE_PROP_UINT32("version" , IntelIOMMUState, version, 0), |
3012 | DEFINE_PROP_ON_OFF_AUTO("eim" , IntelIOMMUState, intr_eim, |
3013 | ON_OFF_AUTO_AUTO), |
3014 | DEFINE_PROP_BOOL("x-buggy-eim" , IntelIOMMUState, buggy_eim, false), |
3015 | DEFINE_PROP_UINT8("aw-bits" , IntelIOMMUState, aw_bits, |
3016 | VTD_HOST_ADDRESS_WIDTH), |
3017 | DEFINE_PROP_BOOL("caching-mode" , IntelIOMMUState, caching_mode, FALSE), |
3018 | DEFINE_PROP_BOOL("x-scalable-mode" , IntelIOMMUState, scalable_mode, FALSE), |
3019 | DEFINE_PROP_BOOL("dma-drain" , IntelIOMMUState, dma_drain, true), |
3020 | DEFINE_PROP_END_OF_LIST(), |
3021 | }; |
3022 | |
3023 | /* Read IRTE entry with specific index */ |
3024 | static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, |
3025 | VTD_IR_TableEntry *entry, uint16_t sid) |
3026 | { |
3027 | static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \ |
3028 | {0xffff, 0xfffb, 0xfff9, 0xfff8}; |
3029 | dma_addr_t addr = 0x00; |
3030 | uint16_t mask, source_id; |
3031 | uint8_t bus, bus_max, bus_min; |
3032 | |
3033 | addr = iommu->intr_root + index * sizeof(*entry); |
3034 | if (dma_memory_read(&address_space_memory, addr, entry, |
3035 | sizeof(*entry))) { |
3036 | error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64, |
3037 | __func__, index, addr); |
3038 | return -VTD_FR_IR_ROOT_INVAL; |
3039 | } |
3040 | |
3041 | trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]), |
3042 | le64_to_cpu(entry->data[0])); |
3043 | |
3044 | if (!entry->irte.present) { |
3045 | error_report_once("%s: detected non-present IRTE " |
3046 | "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")" , |
3047 | __func__, index, le64_to_cpu(entry->data[1]), |
3048 | le64_to_cpu(entry->data[0])); |
3049 | return -VTD_FR_IR_ENTRY_P; |
3050 | } |
3051 | |
3052 | if (entry->irte.__reserved_0 || entry->irte.__reserved_1 || |
3053 | entry->irte.__reserved_2) { |
3054 | error_report_once("%s: detected non-zero reserved IRTE " |
3055 | "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")" , |
3056 | __func__, index, le64_to_cpu(entry->data[1]), |
3057 | le64_to_cpu(entry->data[0])); |
3058 | return -VTD_FR_IR_IRTE_RSVD; |
3059 | } |
3060 | |
3061 | if (sid != X86_IOMMU_SID_INVALID) { |
3062 | /* Validate IRTE SID */ |
3063 | source_id = le32_to_cpu(entry->irte.source_id); |
3064 | switch (entry->irte.sid_vtype) { |
3065 | case VTD_SVT_NONE: |
3066 | break; |
3067 | |
3068 | case VTD_SVT_ALL: |
3069 | mask = vtd_svt_mask[entry->irte.sid_q]; |
3070 | if ((source_id & mask) != (sid & mask)) { |
3071 | error_report_once("%s: invalid IRTE SID " |
3072 | "(index=%u, sid=%u, source_id=%u)" , |
3073 | __func__, index, sid, source_id); |
3074 | return -VTD_FR_IR_SID_ERR; |
3075 | } |
3076 | break; |
3077 | |
3078 | case VTD_SVT_BUS: |
3079 | bus_max = source_id >> 8; |
3080 | bus_min = source_id & 0xff; |
3081 | bus = sid >> 8; |
3082 | if (bus > bus_max || bus < bus_min) { |
3083 | error_report_once("%s: invalid SVT_BUS " |
3084 | "(index=%u, bus=%u, min=%u, max=%u)" , |
3085 | __func__, index, bus, bus_min, bus_max); |
3086 | return -VTD_FR_IR_SID_ERR; |
3087 | } |
3088 | break; |
3089 | |
3090 | default: |
3091 | error_report_once("%s: detected invalid IRTE SVT " |
3092 | "(index=%u, type=%d)" , __func__, |
3093 | index, entry->irte.sid_vtype); |
3094 | /* Take this as verification failure. */ |
3095 | return -VTD_FR_IR_SID_ERR; |
3096 | break; |
3097 | } |
3098 | } |
3099 | |
3100 | return 0; |
3101 | } |
3102 | |
3103 | /* Fetch IRQ information of specific IR index */ |
3104 | static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index, |
3105 | X86IOMMUIrq *irq, uint16_t sid) |
3106 | { |
3107 | VTD_IR_TableEntry irte = {}; |
3108 | int ret = 0; |
3109 | |
3110 | ret = vtd_irte_get(iommu, index, &irte, sid); |
3111 | if (ret) { |
3112 | return ret; |
3113 | } |
3114 | |
3115 | irq->trigger_mode = irte.irte.trigger_mode; |
3116 | irq->vector = irte.irte.vector; |
3117 | irq->delivery_mode = irte.irte.delivery_mode; |
3118 | irq->dest = le32_to_cpu(irte.irte.dest_id); |
3119 | if (!iommu->intr_eime) { |
3120 | #define VTD_IR_APIC_DEST_MASK (0xff00ULL) |
3121 | #define VTD_IR_APIC_DEST_SHIFT (8) |
3122 | irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >> |
3123 | VTD_IR_APIC_DEST_SHIFT; |
3124 | } |
3125 | irq->dest_mode = irte.irte.dest_mode; |
3126 | irq->redir_hint = irte.irte.redir_hint; |
3127 | |
3128 | trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector, |
3129 | irq->delivery_mode, irq->dest, irq->dest_mode); |
3130 | |
3131 | return 0; |
3132 | } |
3133 | |
3134 | /* Interrupt remapping for MSI/MSI-X entry */ |
3135 | static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, |
3136 | MSIMessage *origin, |
3137 | MSIMessage *translated, |
3138 | uint16_t sid) |
3139 | { |
3140 | int ret = 0; |
3141 | VTD_IR_MSIAddress addr; |
3142 | uint16_t index; |
3143 | X86IOMMUIrq irq = {}; |
3144 | |
3145 | assert(origin && translated); |
3146 | |
3147 | trace_vtd_ir_remap_msi_req(origin->address, origin->data); |
3148 | |
3149 | if (!iommu || !iommu->intr_enabled) { |
3150 | memcpy(translated, origin, sizeof(*origin)); |
3151 | goto out; |
3152 | } |
3153 | |
3154 | if (origin->address & VTD_MSI_ADDR_HI_MASK) { |
3155 | error_report_once("%s: MSI address high 32 bits non-zero detected: " |
3156 | "address=0x%" PRIx64, __func__, origin->address); |
3157 | return -VTD_FR_IR_REQ_RSVD; |
3158 | } |
3159 | |
3160 | addr.data = origin->address & VTD_MSI_ADDR_LO_MASK; |
3161 | if (addr.addr.__head != 0xfee) { |
3162 | error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32, |
3163 | __func__, addr.data); |
3164 | return -VTD_FR_IR_REQ_RSVD; |
3165 | } |
3166 | |
3167 | /* This is compatible mode. */ |
3168 | if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) { |
3169 | memcpy(translated, origin, sizeof(*origin)); |
3170 | goto out; |
3171 | } |
3172 | |
3173 | index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l); |
3174 | |
3175 | #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff) |
3176 | #define VTD_IR_MSI_DATA_RESERVED (0xffff0000) |
3177 | |
3178 | if (addr.addr.sub_valid) { |
3179 | /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */ |
3180 | index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE; |
3181 | } |
3182 | |
3183 | ret = vtd_remap_irq_get(iommu, index, &irq, sid); |
3184 | if (ret) { |
3185 | return ret; |
3186 | } |
3187 | |
3188 | if (addr.addr.sub_valid) { |
3189 | trace_vtd_ir_remap_type("MSI" ); |
3190 | if (origin->data & VTD_IR_MSI_DATA_RESERVED) { |
3191 | error_report_once("%s: invalid IR MSI " |
3192 | "(sid=%u, address=0x%" PRIx64 |
3193 | ", data=0x%" PRIx32 ")" , |
3194 | __func__, sid, origin->address, origin->data); |
3195 | return -VTD_FR_IR_REQ_RSVD; |
3196 | } |
3197 | } else { |
3198 | uint8_t vector = origin->data & 0xff; |
3199 | uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; |
3200 | |
3201 | trace_vtd_ir_remap_type("IOAPIC" ); |
3202 | /* IOAPIC entry vector should be aligned with IRTE vector |
3203 | * (see vt-d spec 5.1.5.1). */ |
3204 | if (vector != irq.vector) { |
3205 | trace_vtd_warn_ir_vector(sid, index, vector, irq.vector); |
3206 | } |
3207 | |
3208 | /* The Trigger Mode field must match the Trigger Mode in the IRTE. |
3209 | * (see vt-d spec 5.1.5.1). */ |
3210 | if (trigger_mode != irq.trigger_mode) { |
3211 | trace_vtd_warn_ir_trigger(sid, index, trigger_mode, |
3212 | irq.trigger_mode); |
3213 | } |
3214 | } |
3215 | |
3216 | /* |
3217 | * We'd better keep the last two bits, assuming that guest OS |
3218 | * might modify it. Keep it does not hurt after all. |
3219 | */ |
3220 | irq.msi_addr_last_bits = addr.addr.__not_care; |
3221 | |
3222 | /* Translate X86IOMMUIrq to MSI message */ |
3223 | x86_iommu_irq_to_msi_message(&irq, translated); |
3224 | |
3225 | out: |
3226 | trace_vtd_ir_remap_msi(origin->address, origin->data, |
3227 | translated->address, translated->data); |
3228 | return 0; |
3229 | } |
3230 | |
3231 | static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src, |
3232 | MSIMessage *dst, uint16_t sid) |
3233 | { |
3234 | return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu), |
3235 | src, dst, sid); |
3236 | } |
3237 | |
3238 | static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr, |
3239 | uint64_t *data, unsigned size, |
3240 | MemTxAttrs attrs) |
3241 | { |
3242 | return MEMTX_OK; |
3243 | } |
3244 | |
3245 | static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr, |
3246 | uint64_t value, unsigned size, |
3247 | MemTxAttrs attrs) |
3248 | { |
3249 | int ret = 0; |
3250 | MSIMessage from = {}, to = {}; |
3251 | uint16_t sid = X86_IOMMU_SID_INVALID; |
3252 | |
3253 | from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST; |
3254 | from.data = (uint32_t) value; |
3255 | |
3256 | if (!attrs.unspecified) { |
3257 | /* We have explicit Source ID */ |
3258 | sid = attrs.requester_id; |
3259 | } |
3260 | |
3261 | ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid); |
3262 | if (ret) { |
3263 | /* TODO: report error */ |
3264 | /* Drop this interrupt */ |
3265 | return MEMTX_ERROR; |
3266 | } |
3267 | |
3268 | apic_get_class()->send_msi(&to); |
3269 | |
3270 | return MEMTX_OK; |
3271 | } |
3272 | |
3273 | static const MemoryRegionOps vtd_mem_ir_ops = { |
3274 | .read_with_attrs = vtd_mem_ir_read, |
3275 | .write_with_attrs = vtd_mem_ir_write, |
3276 | .endianness = DEVICE_LITTLE_ENDIAN, |
3277 | .impl = { |
3278 | .min_access_size = 4, |
3279 | .max_access_size = 4, |
3280 | }, |
3281 | .valid = { |
3282 | .min_access_size = 4, |
3283 | .max_access_size = 4, |
3284 | }, |
3285 | }; |
3286 | |
3287 | VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) |
3288 | { |
3289 | uintptr_t key = (uintptr_t)bus; |
3290 | VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key); |
3291 | VTDAddressSpace *vtd_dev_as; |
3292 | char name[128]; |
3293 | |
3294 | if (!vtd_bus) { |
3295 | uintptr_t *new_key = g_malloc(sizeof(*new_key)); |
3296 | *new_key = (uintptr_t)bus; |
3297 | /* No corresponding free() */ |
3298 | vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \ |
3299 | PCI_DEVFN_MAX); |
3300 | vtd_bus->bus = bus; |
3301 | g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus); |
3302 | } |
3303 | |
3304 | vtd_dev_as = vtd_bus->dev_as[devfn]; |
3305 | |
3306 | if (!vtd_dev_as) { |
3307 | snprintf(name, sizeof(name), "vtd-%02x.%x" , PCI_SLOT(devfn), |
3308 | PCI_FUNC(devfn)); |
3309 | vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace)); |
3310 | |
3311 | vtd_dev_as->bus = bus; |
3312 | vtd_dev_as->devfn = (uint8_t)devfn; |
3313 | vtd_dev_as->iommu_state = s; |
3314 | vtd_dev_as->context_cache_entry.context_cache_gen = 0; |
3315 | vtd_dev_as->iova_tree = iova_tree_new(); |
3316 | |
3317 | memory_region_init(&vtd_dev_as->root, OBJECT(s), name, UINT64_MAX); |
3318 | address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, "vtd-root" ); |
3319 | |
3320 | /* |
3321 | * Build the DMAR-disabled container with aliases to the |
3322 | * shared MRs. Note that aliasing to a shared memory region |
3323 | * could help the memory API to detect same FlatViews so we |
3324 | * can have devices to share the same FlatView when DMAR is |
3325 | * disabled (either by not providing "intel_iommu=on" or with |
3326 | * "iommu=pt"). It will greatly reduce the total number of |
3327 | * FlatViews of the system hence VM runs faster. |
3328 | */ |
3329 | memory_region_init_alias(&vtd_dev_as->nodmar, OBJECT(s), |
3330 | "vtd-nodmar" , &s->mr_nodmar, 0, |
3331 | memory_region_size(&s->mr_nodmar)); |
3332 | |
3333 | /* |
3334 | * Build the per-device DMAR-enabled container. |
3335 | * |
3336 | * TODO: currently we have per-device IOMMU memory region only |
3337 | * because we have per-device IOMMU notifiers for devices. If |
3338 | * one day we can abstract the IOMMU notifiers out of the |
3339 | * memory regions then we can also share the same memory |
3340 | * region here just like what we've done above with the nodmar |
3341 | * region. |
3342 | */ |
3343 | strcat(name, "-dmar" ); |
3344 | memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu), |
3345 | TYPE_INTEL_IOMMU_MEMORY_REGION, OBJECT(s), |
3346 | name, UINT64_MAX); |
3347 | memory_region_init_alias(&vtd_dev_as->iommu_ir, OBJECT(s), "vtd-ir" , |
3348 | &s->mr_ir, 0, memory_region_size(&s->mr_ir)); |
3349 | memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->iommu), |
3350 | VTD_INTERRUPT_ADDR_FIRST, |
3351 | &vtd_dev_as->iommu_ir, 1); |
3352 | |
3353 | /* |
3354 | * Hook both the containers under the root container, we |
3355 | * switch between DMAR & noDMAR by enable/disable |
3356 | * corresponding sub-containers |
3357 | */ |
3358 | memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, |
3359 | MEMORY_REGION(&vtd_dev_as->iommu), |
3360 | 0); |
3361 | memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, |
3362 | &vtd_dev_as->nodmar, 0); |
3363 | |
3364 | vtd_switch_address_space(vtd_dev_as); |
3365 | } |
3366 | return vtd_dev_as; |
3367 | } |
3368 | |
3369 | static uint64_t get_naturally_aligned_size(uint64_t start, |
3370 | uint64_t size, int gaw) |
3371 | { |
3372 | uint64_t max_mask = 1ULL << gaw; |
3373 | uint64_t alignment = start ? start & -start : max_mask; |
3374 | |
3375 | alignment = MIN(alignment, max_mask); |
3376 | size = MIN(size, max_mask); |
3377 | |
3378 | if (alignment <= size) { |
3379 | /* Increase the alignment of start */ |
3380 | return alignment; |
3381 | } else { |
3382 | /* Find the largest page mask from size */ |
3383 | return 1ULL << (63 - clz64(size)); |
3384 | } |
3385 | } |
3386 | |
3387 | /* Unmap the whole range in the notifier's scope. */ |
3388 | static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) |
3389 | { |
3390 | hwaddr size, remain; |
3391 | hwaddr start = n->start; |
3392 | hwaddr end = n->end; |
3393 | IntelIOMMUState *s = as->iommu_state; |
3394 | DMAMap map; |
3395 | |
3396 | /* |
3397 | * Note: all the codes in this function has a assumption that IOVA |
3398 | * bits are no more than VTD_MGAW bits (which is restricted by |
3399 | * VT-d spec), otherwise we need to consider overflow of 64 bits. |
3400 | */ |
3401 | |
3402 | if (end > VTD_ADDRESS_SIZE(s->aw_bits) - 1) { |
3403 | /* |
3404 | * Don't need to unmap regions that is bigger than the whole |
3405 | * VT-d supported address space size |
3406 | */ |
3407 | end = VTD_ADDRESS_SIZE(s->aw_bits) - 1; |
3408 | } |
3409 | |
3410 | assert(start <= end); |
3411 | size = remain = end - start + 1; |
3412 | |
3413 | while (remain >= VTD_PAGE_SIZE) { |
3414 | IOMMUTLBEntry entry; |
3415 | uint64_t mask = get_naturally_aligned_size(start, remain, s->aw_bits); |
3416 | |
3417 | assert(mask); |
3418 | |
3419 | entry.iova = start; |
3420 | entry.addr_mask = mask - 1; |
3421 | entry.target_as = &address_space_memory; |
3422 | entry.perm = IOMMU_NONE; |
3423 | /* This field is meaningless for unmap */ |
3424 | entry.translated_addr = 0; |
3425 | |
3426 | memory_region_notify_one(n, &entry); |
3427 | |
3428 | start += mask; |
3429 | remain -= mask; |
3430 | } |
3431 | |
3432 | assert(!remain); |
3433 | |
3434 | trace_vtd_as_unmap_whole(pci_bus_num(as->bus), |
3435 | VTD_PCI_SLOT(as->devfn), |
3436 | VTD_PCI_FUNC(as->devfn), |
3437 | n->start, size); |
3438 | |
3439 | map.iova = n->start; |
3440 | map.size = size; |
3441 | iova_tree_remove(as->iova_tree, &map); |
3442 | } |
3443 | |
3444 | static void vtd_address_space_unmap_all(IntelIOMMUState *s) |
3445 | { |
3446 | VTDAddressSpace *vtd_as; |
3447 | IOMMUNotifier *n; |
3448 | |
3449 | QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { |
3450 | IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { |
3451 | vtd_address_space_unmap(vtd_as, n); |
3452 | } |
3453 | } |
3454 | } |
3455 | |
3456 | static void vtd_address_space_refresh_all(IntelIOMMUState *s) |
3457 | { |
3458 | vtd_address_space_unmap_all(s); |
3459 | vtd_switch_address_space_all(s); |
3460 | } |
3461 | |
3462 | static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private) |
3463 | { |
3464 | memory_region_notify_one((IOMMUNotifier *)private, entry); |
3465 | return 0; |
3466 | } |
3467 | |
3468 | static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) |
3469 | { |
3470 | VTDAddressSpace *vtd_as = container_of(iommu_mr, VTDAddressSpace, iommu); |
3471 | IntelIOMMUState *s = vtd_as->iommu_state; |
3472 | uint8_t bus_n = pci_bus_num(vtd_as->bus); |
3473 | VTDContextEntry ce; |
3474 | |
3475 | /* |
3476 | * The replay can be triggered by either a invalidation or a newly |
3477 | * created entry. No matter what, we release existing mappings |
3478 | * (it means flushing caches for UNMAP-only registers). |
3479 | */ |
3480 | vtd_address_space_unmap(vtd_as, n); |
3481 | |
3482 | if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { |
3483 | trace_vtd_replay_ce_valid(s->root_scalable ? "scalable mode" : |
3484 | "legacy mode" , |
3485 | bus_n, PCI_SLOT(vtd_as->devfn), |
3486 | PCI_FUNC(vtd_as->devfn), |
3487 | vtd_get_domain_id(s, &ce), |
3488 | ce.hi, ce.lo); |
3489 | if (vtd_as_has_map_notifier(vtd_as)) { |
3490 | /* This is required only for MAP typed notifiers */ |
3491 | vtd_page_walk_info info = { |
3492 | .hook_fn = vtd_replay_hook, |
3493 | .private = (void *)n, |
3494 | .notify_unmap = false, |
3495 | .aw = s->aw_bits, |
3496 | .as = vtd_as, |
3497 | .domain_id = vtd_get_domain_id(s, &ce), |
3498 | }; |
3499 | |
3500 | vtd_page_walk(s, &ce, 0, ~0ULL, &info); |
3501 | } |
3502 | } else { |
3503 | trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), |
3504 | PCI_FUNC(vtd_as->devfn)); |
3505 | } |
3506 | |
3507 | return; |
3508 | } |
3509 | |
3510 | /* Do the initialization. It will also be called when reset, so pay |
3511 | * attention when adding new initialization stuff. |
3512 | */ |
3513 | static void vtd_init(IntelIOMMUState *s) |
3514 | { |
3515 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
3516 | |
3517 | memset(s->csr, 0, DMAR_REG_SIZE); |
3518 | memset(s->wmask, 0, DMAR_REG_SIZE); |
3519 | memset(s->w1cmask, 0, DMAR_REG_SIZE); |
3520 | memset(s->womask, 0, DMAR_REG_SIZE); |
3521 | |
3522 | s->root = 0; |
3523 | s->root_scalable = false; |
3524 | s->dmar_enabled = false; |
3525 | s->intr_enabled = false; |
3526 | s->iq_head = 0; |
3527 | s->iq_tail = 0; |
3528 | s->iq = 0; |
3529 | s->iq_size = 0; |
3530 | s->qi_enabled = false; |
3531 | s->iq_last_desc_type = VTD_INV_DESC_NONE; |
3532 | s->iq_dw = false; |
3533 | s->next_frcd_reg = 0; |
3534 | s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | |
3535 | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS | |
3536 | VTD_CAP_SAGAW_39bit | VTD_CAP_MGAW(s->aw_bits); |
3537 | if (s->dma_drain) { |
3538 | s->cap |= VTD_CAP_DRAIN; |
3539 | } |
3540 | if (s->aw_bits == VTD_HOST_AW_48BIT) { |
3541 | s->cap |= VTD_CAP_SAGAW_48bit; |
3542 | } |
3543 | s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; |
3544 | |
3545 | /* |
3546 | * Rsvd field masks for spte |
3547 | */ |
3548 | vtd_paging_entry_rsvd_field[0] = ~0ULL; |
3549 | vtd_paging_entry_rsvd_field[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits); |
3550 | vtd_paging_entry_rsvd_field[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits); |
3551 | vtd_paging_entry_rsvd_field[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits); |
3552 | vtd_paging_entry_rsvd_field[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits); |
3553 | vtd_paging_entry_rsvd_field[5] = VTD_SPTE_LPAGE_L1_RSVD_MASK(s->aw_bits); |
3554 | vtd_paging_entry_rsvd_field[6] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits); |
3555 | vtd_paging_entry_rsvd_field[7] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits); |
3556 | vtd_paging_entry_rsvd_field[8] = VTD_SPTE_LPAGE_L4_RSVD_MASK(s->aw_bits); |
3557 | |
3558 | if (x86_iommu_ir_supported(x86_iommu)) { |
3559 | s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV; |
3560 | if (s->intr_eim == ON_OFF_AUTO_ON) { |
3561 | s->ecap |= VTD_ECAP_EIM; |
3562 | } |
3563 | assert(s->intr_eim != ON_OFF_AUTO_AUTO); |
3564 | } |
3565 | |
3566 | if (x86_iommu->dt_supported) { |
3567 | s->ecap |= VTD_ECAP_DT; |
3568 | } |
3569 | |
3570 | if (x86_iommu->pt_supported) { |
3571 | s->ecap |= VTD_ECAP_PT; |
3572 | } |
3573 | |
3574 | if (s->caching_mode) { |
3575 | s->cap |= VTD_CAP_CM; |
3576 | } |
3577 | |
3578 | /* TODO: read cap/ecap from host to decide which cap to be exposed. */ |
3579 | if (s->scalable_mode) { |
3580 | s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS; |
3581 | } |
3582 | |
3583 | vtd_reset_caches(s); |
3584 | |
3585 | /* Define registers with default values and bit semantics */ |
3586 | vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0); |
3587 | vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0); |
3588 | vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0); |
3589 | vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0); |
3590 | vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL); |
3591 | vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0); |
3592 | vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffffc00ULL, 0); |
3593 | vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0); |
3594 | vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL); |
3595 | |
3596 | /* Advanced Fault Logging not supported */ |
3597 | vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL); |
3598 | vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0); |
3599 | vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0); |
3600 | vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0); |
3601 | |
3602 | /* Treated as RsvdZ when EIM in ECAP_REG is not supported |
3603 | * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0); |
3604 | */ |
3605 | vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0); |
3606 | |
3607 | /* Treated as RO for implementations that PLMR and PHMR fields reported |
3608 | * as Clear in the CAP_REG. |
3609 | * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0); |
3610 | */ |
3611 | vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0); |
3612 | |
3613 | vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0); |
3614 | vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0); |
3615 | vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff807ULL, 0); |
3616 | vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL); |
3617 | vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0); |
3618 | vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0); |
3619 | vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0); |
3620 | /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */ |
3621 | vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0); |
3622 | |
3623 | /* IOTLB registers */ |
3624 | vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0); |
3625 | vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0); |
3626 | vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL); |
3627 | |
3628 | /* Fault Recording Registers, 128-bit */ |
3629 | vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0); |
3630 | vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL); |
3631 | |
3632 | /* |
3633 | * Interrupt remapping registers. |
3634 | */ |
3635 | vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0); |
3636 | } |
3637 | |
3638 | /* Should not reset address_spaces when reset because devices will still use |
3639 | * the address space they got at first (won't ask the bus again). |
3640 | */ |
3641 | static void vtd_reset(DeviceState *dev) |
3642 | { |
3643 | IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); |
3644 | |
3645 | vtd_init(s); |
3646 | vtd_address_space_refresh_all(s); |
3647 | } |
3648 | |
3649 | static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) |
3650 | { |
3651 | IntelIOMMUState *s = opaque; |
3652 | VTDAddressSpace *vtd_as; |
3653 | |
3654 | assert(0 <= devfn && devfn < PCI_DEVFN_MAX); |
3655 | |
3656 | vtd_as = vtd_find_add_as(s, bus, devfn); |
3657 | return &vtd_as->as; |
3658 | } |
3659 | |
3660 | static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) |
3661 | { |
3662 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
3663 | |
3664 | if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu_ir_supported(x86_iommu)) { |
3665 | error_setg(errp, "eim=on cannot be selected without intremap=on" ); |
3666 | return false; |
3667 | } |
3668 | |
3669 | if (s->intr_eim == ON_OFF_AUTO_AUTO) { |
3670 | s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim) |
3671 | && x86_iommu_ir_supported(x86_iommu) ? |
3672 | ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; |
3673 | } |
3674 | if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) { |
3675 | if (!kvm_irqchip_in_kernel()) { |
3676 | error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split" ); |
3677 | return false; |
3678 | } |
3679 | if (!kvm_enable_x2apic()) { |
3680 | error_setg(errp, "eim=on requires support on the KVM side" |
3681 | "(X2APIC_API, first shipped in v4.7)" ); |
3682 | return false; |
3683 | } |
3684 | } |
3685 | |
3686 | /* Currently only address widths supported are 39 and 48 bits */ |
3687 | if ((s->aw_bits != VTD_HOST_AW_39BIT) && |
3688 | (s->aw_bits != VTD_HOST_AW_48BIT)) { |
3689 | error_setg(errp, "Supported values for x-aw-bits are: %d, %d" , |
3690 | VTD_HOST_AW_39BIT, VTD_HOST_AW_48BIT); |
3691 | return false; |
3692 | } |
3693 | |
3694 | if (s->scalable_mode && !s->dma_drain) { |
3695 | error_setg(errp, "Need to set dma_drain for scalable mode" ); |
3696 | return false; |
3697 | } |
3698 | |
3699 | return true; |
3700 | } |
3701 | |
3702 | static void vtd_realize(DeviceState *dev, Error **errp) |
3703 | { |
3704 | MachineState *ms = MACHINE(qdev_get_machine()); |
3705 | PCMachineState *pcms = PC_MACHINE(ms); |
3706 | PCIBus *bus = pcms->bus; |
3707 | IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); |
3708 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); |
3709 | |
3710 | x86_iommu->type = TYPE_INTEL; |
3711 | |
3712 | if (!vtd_decide_config(s, errp)) { |
3713 | return; |
3714 | } |
3715 | |
3716 | QLIST_INIT(&s->vtd_as_with_notifiers); |
3717 | qemu_mutex_init(&s->iommu_lock); |
3718 | memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num)); |
3719 | memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, |
3720 | "intel_iommu" , DMAR_REG_SIZE); |
3721 | |
3722 | /* Create the shared memory regions by all devices */ |
3723 | memory_region_init(&s->mr_nodmar, OBJECT(s), "vtd-nodmar" , |
3724 | UINT64_MAX); |
3725 | memory_region_init_io(&s->mr_ir, OBJECT(s), &vtd_mem_ir_ops, |
3726 | s, "vtd-ir" , VTD_INTERRUPT_ADDR_SIZE); |
3727 | memory_region_init_alias(&s->mr_sys_alias, OBJECT(s), |
3728 | "vtd-sys-alias" , get_system_memory(), 0, |
3729 | memory_region_size(get_system_memory())); |
3730 | memory_region_add_subregion_overlap(&s->mr_nodmar, 0, |
3731 | &s->mr_sys_alias, 0); |
3732 | memory_region_add_subregion_overlap(&s->mr_nodmar, |
3733 | VTD_INTERRUPT_ADDR_FIRST, |
3734 | &s->mr_ir, 1); |
3735 | |
3736 | sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem); |
3737 | /* No corresponding destroy */ |
3738 | s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, |
3739 | g_free, g_free); |
3740 | s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, |
3741 | g_free, g_free); |
3742 | vtd_init(s); |
3743 | sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); |
3744 | pci_setup_iommu(bus, vtd_host_dma_iommu, dev); |
3745 | /* Pseudo address space under root PCI bus. */ |
3746 | pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); |
3747 | } |
3748 | |
3749 | static void vtd_class_init(ObjectClass *klass, void *data) |
3750 | { |
3751 | DeviceClass *dc = DEVICE_CLASS(klass); |
3752 | X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass); |
3753 | |
3754 | dc->reset = vtd_reset; |
3755 | dc->vmsd = &vtd_vmstate; |
3756 | dc->props = vtd_properties; |
3757 | dc->hotpluggable = false; |
3758 | x86_class->realize = vtd_realize; |
3759 | x86_class->int_remap = vtd_int_remap; |
3760 | /* Supported by the pc-q35-* machine types */ |
3761 | dc->user_creatable = true; |
3762 | set_bit(DEVICE_CATEGORY_MISC, dc->categories); |
3763 | dc->desc = "Intel IOMMU (VT-d) DMA Remapping device" ; |
3764 | } |
3765 | |
3766 | static const TypeInfo vtd_info = { |
3767 | .name = TYPE_INTEL_IOMMU_DEVICE, |
3768 | .parent = TYPE_X86_IOMMU_DEVICE, |
3769 | .instance_size = sizeof(IntelIOMMUState), |
3770 | .class_init = vtd_class_init, |
3771 | }; |
3772 | |
3773 | static void vtd_iommu_memory_region_class_init(ObjectClass *klass, |
3774 | void *data) |
3775 | { |
3776 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); |
3777 | |
3778 | imrc->translate = vtd_iommu_translate; |
3779 | imrc->notify_flag_changed = vtd_iommu_notify_flag_changed; |
3780 | imrc->replay = vtd_iommu_replay; |
3781 | } |
3782 | |
3783 | static const TypeInfo vtd_iommu_memory_region_info = { |
3784 | .parent = TYPE_IOMMU_MEMORY_REGION, |
3785 | .name = TYPE_INTEL_IOMMU_MEMORY_REGION, |
3786 | .class_init = vtd_iommu_memory_region_class_init, |
3787 | }; |
3788 | |
3789 | static void vtd_register_types(void) |
3790 | { |
3791 | type_register_static(&vtd_info); |
3792 | type_register_static(&vtd_iommu_memory_region_info); |
3793 | } |
3794 | |
3795 | type_init(vtd_register_types) |
3796 | |