1 | /* |
2 | * QEMU PowerPC XIVE interrupt controller model |
3 | * |
4 | * Copyright (c) 2017-2018, IBM Corporation. |
5 | * |
6 | * This code is licensed under the GPL version 2 or later. See the |
7 | * COPYING file in the top-level directory. |
8 | */ |
9 | |
10 | #include "qemu/osdep.h" |
11 | #include "qemu/log.h" |
12 | #include "qemu/module.h" |
13 | #include "qapi/error.h" |
14 | #include "target/ppc/cpu.h" |
15 | #include "sysemu/cpus.h" |
16 | #include "sysemu/dma.h" |
17 | #include "sysemu/reset.h" |
18 | #include "hw/qdev-properties.h" |
19 | #include "migration/vmstate.h" |
20 | #include "monitor/monitor.h" |
21 | #include "hw/irq.h" |
22 | #include "hw/ppc/xive.h" |
23 | #include "hw/ppc/xive_regs.h" |
24 | |
25 | /* |
26 | * XIVE Thread Interrupt Management context |
27 | */ |
28 | |
29 | /* |
30 | * Convert a priority number to an Interrupt Pending Buffer (IPB) |
31 | * register, which indicates a pending interrupt at the priority |
32 | * corresponding to the bit number |
33 | */ |
34 | static uint8_t priority_to_ipb(uint8_t priority) |
35 | { |
36 | return priority > XIVE_PRIORITY_MAX ? |
37 | 0 : 1 << (XIVE_PRIORITY_MAX - priority); |
38 | } |
39 | |
40 | /* |
41 | * Convert an Interrupt Pending Buffer (IPB) register to a Pending |
42 | * Interrupt Priority Register (PIPR), which contains the priority of |
43 | * the most favored pending notification. |
44 | */ |
45 | static uint8_t ipb_to_pipr(uint8_t ibp) |
46 | { |
47 | return ibp ? clz32((uint32_t)ibp << 24) : 0xff; |
48 | } |
49 | |
50 | static void ipb_update(uint8_t *regs, uint8_t priority) |
51 | { |
52 | regs[TM_IPB] |= priority_to_ipb(priority); |
53 | regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); |
54 | } |
55 | |
56 | static uint8_t exception_mask(uint8_t ring) |
57 | { |
58 | switch (ring) { |
59 | case TM_QW1_OS: |
60 | return TM_QW1_NSR_EO; |
61 | case TM_QW3_HV_PHYS: |
62 | return TM_QW3_NSR_HE; |
63 | default: |
64 | g_assert_not_reached(); |
65 | } |
66 | } |
67 | |
68 | static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) |
69 | { |
70 | switch (ring) { |
71 | case TM_QW0_USER: |
72 | return 0; /* Not supported */ |
73 | case TM_QW1_OS: |
74 | return tctx->os_output; |
75 | case TM_QW2_HV_POOL: |
76 | case TM_QW3_HV_PHYS: |
77 | return tctx->hv_output; |
78 | default: |
79 | return 0; |
80 | } |
81 | } |
82 | |
83 | static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) |
84 | { |
85 | uint8_t *regs = &tctx->regs[ring]; |
86 | uint8_t nsr = regs[TM_NSR]; |
87 | uint8_t mask = exception_mask(ring); |
88 | |
89 | qemu_irq_lower(xive_tctx_output(tctx, ring)); |
90 | |
91 | if (regs[TM_NSR] & mask) { |
92 | uint8_t cppr = regs[TM_PIPR]; |
93 | |
94 | regs[TM_CPPR] = cppr; |
95 | |
96 | /* Reset the pending buffer bit */ |
97 | regs[TM_IPB] &= ~priority_to_ipb(cppr); |
98 | regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); |
99 | |
100 | /* Drop Exception bit */ |
101 | regs[TM_NSR] &= ~mask; |
102 | } |
103 | |
104 | return (nsr << 8) | regs[TM_CPPR]; |
105 | } |
106 | |
107 | static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) |
108 | { |
109 | uint8_t *regs = &tctx->regs[ring]; |
110 | |
111 | if (regs[TM_PIPR] < regs[TM_CPPR]) { |
112 | switch (ring) { |
113 | case TM_QW1_OS: |
114 | regs[TM_NSR] |= TM_QW1_NSR_EO; |
115 | break; |
116 | case TM_QW3_HV_PHYS: |
117 | regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); |
118 | break; |
119 | default: |
120 | g_assert_not_reached(); |
121 | } |
122 | qemu_irq_raise(xive_tctx_output(tctx, ring)); |
123 | } |
124 | } |
125 | |
126 | static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) |
127 | { |
128 | if (cppr > XIVE_PRIORITY_MAX) { |
129 | cppr = 0xff; |
130 | } |
131 | |
132 | tctx->regs[ring + TM_CPPR] = cppr; |
133 | |
134 | /* CPPR has changed, check if we need to raise a pending exception */ |
135 | xive_tctx_notify(tctx, ring); |
136 | } |
137 | |
138 | static inline uint32_t xive_tctx_word2(uint8_t *ring) |
139 | { |
140 | return *((uint32_t *) &ring[TM_WORD2]); |
141 | } |
142 | |
143 | /* |
144 | * XIVE Thread Interrupt Management Area (TIMA) |
145 | */ |
146 | |
147 | static void xive_tm_set_hv_cppr(XiveTCTX *tctx, hwaddr offset, |
148 | uint64_t value, unsigned size) |
149 | { |
150 | xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); |
151 | } |
152 | |
153 | static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size) |
154 | { |
155 | return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); |
156 | } |
157 | |
158 | static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset, |
159 | unsigned size) |
160 | { |
161 | uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); |
162 | uint32_t qw2w2; |
163 | |
164 | qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0); |
165 | memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); |
166 | return qw2w2; |
167 | } |
168 | |
169 | static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset, |
170 | uint64_t value, unsigned size) |
171 | { |
172 | tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff; |
173 | } |
174 | |
175 | static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size) |
176 | { |
177 | return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff; |
178 | } |
179 | |
180 | /* |
181 | * Define an access map for each page of the TIMA that we will use in |
182 | * the memory region ops to filter values when doing loads and stores |
183 | * of raw registers values |
184 | * |
185 | * Registers accessibility bits : |
186 | * |
187 | * 0x0 - no access |
188 | * 0x1 - write only |
189 | * 0x2 - read only |
190 | * 0x3 - read/write |
191 | */ |
192 | |
193 | static const uint8_t xive_tm_hw_view[] = { |
194 | 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ |
195 | 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ |
196 | 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ |
197 | 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ |
198 | }; |
199 | |
200 | static const uint8_t xive_tm_hv_view[] = { |
201 | 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ |
202 | 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ |
203 | 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ |
204 | 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ |
205 | }; |
206 | |
207 | static const uint8_t xive_tm_os_view[] = { |
208 | 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ |
209 | 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ |
210 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ |
211 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ |
212 | }; |
213 | |
214 | static const uint8_t xive_tm_user_view[] = { |
215 | 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */ |
216 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */ |
217 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */ |
218 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */ |
219 | }; |
220 | |
221 | /* |
222 | * Overall TIMA access map for the thread interrupt management context |
223 | * registers |
224 | */ |
225 | static const uint8_t *xive_tm_views[] = { |
226 | [XIVE_TM_HW_PAGE] = xive_tm_hw_view, |
227 | [XIVE_TM_HV_PAGE] = xive_tm_hv_view, |
228 | [XIVE_TM_OS_PAGE] = xive_tm_os_view, |
229 | [XIVE_TM_USER_PAGE] = xive_tm_user_view, |
230 | }; |
231 | |
232 | /* |
233 | * Computes a register access mask for a given offset in the TIMA |
234 | */ |
235 | static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write) |
236 | { |
237 | uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; |
238 | uint8_t reg_offset = offset & 0x3F; |
239 | uint8_t reg_mask = write ? 0x1 : 0x2; |
240 | uint64_t mask = 0x0; |
241 | int i; |
242 | |
243 | for (i = 0; i < size; i++) { |
244 | if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) { |
245 | mask |= (uint64_t) 0xff << (8 * (size - i - 1)); |
246 | } |
247 | } |
248 | |
249 | return mask; |
250 | } |
251 | |
252 | static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, |
253 | unsigned size) |
254 | { |
255 | uint8_t ring_offset = offset & 0x30; |
256 | uint8_t reg_offset = offset & 0x3F; |
257 | uint64_t mask = xive_tm_mask(offset, size, true); |
258 | int i; |
259 | |
260 | /* |
261 | * Only 4 or 8 bytes stores are allowed and the User ring is |
262 | * excluded |
263 | */ |
264 | if (size < 4 || !mask || ring_offset == TM_QW0_USER) { |
265 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" |
266 | HWADDR_PRIx"\n" , offset); |
267 | return; |
268 | } |
269 | |
270 | /* |
271 | * Use the register offset for the raw values and filter out |
272 | * reserved values |
273 | */ |
274 | for (i = 0; i < size; i++) { |
275 | uint8_t byte_mask = (mask >> (8 * (size - i - 1))); |
276 | if (byte_mask) { |
277 | tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) & |
278 | byte_mask; |
279 | } |
280 | } |
281 | } |
282 | |
283 | static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) |
284 | { |
285 | uint8_t ring_offset = offset & 0x30; |
286 | uint8_t reg_offset = offset & 0x3F; |
287 | uint64_t mask = xive_tm_mask(offset, size, false); |
288 | uint64_t ret; |
289 | int i; |
290 | |
291 | /* |
292 | * Only 4 or 8 bytes loads are allowed and the User ring is |
293 | * excluded |
294 | */ |
295 | if (size < 4 || !mask || ring_offset == TM_QW0_USER) { |
296 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" |
297 | HWADDR_PRIx"\n" , offset); |
298 | return -1; |
299 | } |
300 | |
301 | /* Use the register offset for the raw values */ |
302 | ret = 0; |
303 | for (i = 0; i < size; i++) { |
304 | ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1)); |
305 | } |
306 | |
307 | /* filter out reserved values */ |
308 | return ret & mask; |
309 | } |
310 | |
311 | /* |
312 | * The TM context is mapped twice within each page. Stores and loads |
313 | * to the first mapping below 2K write and read the specified values |
314 | * without modification. The second mapping above 2K performs specific |
315 | * state changes (side effects) in addition to setting/returning the |
316 | * interrupt management area context of the processor thread. |
317 | */ |
318 | static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size) |
319 | { |
320 | return xive_tctx_accept(tctx, TM_QW1_OS); |
321 | } |
322 | |
323 | static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset, |
324 | uint64_t value, unsigned size) |
325 | { |
326 | xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); |
327 | } |
328 | |
329 | /* |
330 | * Adjust the IPB to allow a CPU to process event queues of other |
331 | * priorities during one physical interrupt cycle. |
332 | */ |
333 | static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset, |
334 | uint64_t value, unsigned size) |
335 | { |
336 | ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff); |
337 | xive_tctx_notify(tctx, TM_QW1_OS); |
338 | } |
339 | |
340 | static uint64_t xive_tm_pull_os_ctx(XiveTCTX *tctx, hwaddr offset, |
341 | unsigned size) |
342 | { |
343 | uint32_t qw1w2_prev = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); |
344 | uint32_t qw1w2; |
345 | |
346 | qw1w2 = xive_set_field32(TM_QW1W2_VO, qw1w2_prev, 0); |
347 | memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); |
348 | return qw1w2; |
349 | } |
350 | |
351 | /* |
352 | * Define a mapping of "special" operations depending on the TIMA page |
353 | * offset and the size of the operation. |
354 | */ |
355 | typedef struct XiveTmOp { |
356 | uint8_t page_offset; |
357 | uint32_t op_offset; |
358 | unsigned size; |
359 | void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value, |
360 | unsigned size); |
361 | uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size); |
362 | } XiveTmOp; |
363 | |
364 | static const XiveTmOp xive_tm_operations[] = { |
365 | /* |
366 | * MMIOs below 2K : raw values and special operations without side |
367 | * effects |
368 | */ |
369 | { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL }, |
370 | { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL }, |
371 | { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL }, |
372 | { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll }, |
373 | |
374 | /* MMIOs above 2K : special operations with side effects */ |
375 | { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg }, |
376 | { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL }, |
377 | { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx }, |
378 | { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx }, |
379 | { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg }, |
380 | { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx }, |
381 | { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx }, |
382 | }; |
383 | |
384 | static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write) |
385 | { |
386 | uint8_t page_offset = (offset >> TM_SHIFT) & 0x3; |
387 | uint32_t op_offset = offset & 0xFFF; |
388 | int i; |
389 | |
390 | for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) { |
391 | const XiveTmOp *xto = &xive_tm_operations[i]; |
392 | |
393 | /* Accesses done from a more privileged TIMA page is allowed */ |
394 | if (xto->page_offset >= page_offset && |
395 | xto->op_offset == op_offset && |
396 | xto->size == size && |
397 | ((write && xto->write_handler) || (!write && xto->read_handler))) { |
398 | return xto; |
399 | } |
400 | } |
401 | return NULL; |
402 | } |
403 | |
404 | /* |
405 | * TIMA MMIO handlers |
406 | */ |
407 | void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, |
408 | unsigned size) |
409 | { |
410 | const XiveTmOp *xto; |
411 | |
412 | /* |
413 | * TODO: check V bit in Q[0-3]W2 |
414 | */ |
415 | |
416 | /* |
417 | * First, check for special operations in the 2K region |
418 | */ |
419 | if (offset & 0x800) { |
420 | xto = xive_tm_find_op(offset, size, true); |
421 | if (!xto) { |
422 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " |
423 | "@%" HWADDR_PRIx"\n" , offset); |
424 | } else { |
425 | xto->write_handler(tctx, offset, value, size); |
426 | } |
427 | return; |
428 | } |
429 | |
430 | /* |
431 | * Then, for special operations in the region below 2K. |
432 | */ |
433 | xto = xive_tm_find_op(offset, size, true); |
434 | if (xto) { |
435 | xto->write_handler(tctx, offset, value, size); |
436 | return; |
437 | } |
438 | |
439 | /* |
440 | * Finish with raw access to the register values |
441 | */ |
442 | xive_tm_raw_write(tctx, offset, value, size); |
443 | } |
444 | |
445 | uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size) |
446 | { |
447 | const XiveTmOp *xto; |
448 | |
449 | /* |
450 | * TODO: check V bit in Q[0-3]W2 |
451 | */ |
452 | |
453 | /* |
454 | * First, check for special operations in the 2K region |
455 | */ |
456 | if (offset & 0x800) { |
457 | xto = xive_tm_find_op(offset, size, false); |
458 | if (!xto) { |
459 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" |
460 | "@%" HWADDR_PRIx"\n" , offset); |
461 | return -1; |
462 | } |
463 | return xto->read_handler(tctx, offset, size); |
464 | } |
465 | |
466 | /* |
467 | * Then, for special operations in the region below 2K. |
468 | */ |
469 | xto = xive_tm_find_op(offset, size, false); |
470 | if (xto) { |
471 | return xto->read_handler(tctx, offset, size); |
472 | } |
473 | |
474 | /* |
475 | * Finish with raw access to the register values |
476 | */ |
477 | return xive_tm_raw_read(tctx, offset, size); |
478 | } |
479 | |
480 | static void xive_tm_write(void *opaque, hwaddr offset, |
481 | uint64_t value, unsigned size) |
482 | { |
483 | XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu); |
484 | |
485 | xive_tctx_tm_write(tctx, offset, value, size); |
486 | } |
487 | |
488 | static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size) |
489 | { |
490 | XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu); |
491 | |
492 | return xive_tctx_tm_read(tctx, offset, size); |
493 | } |
494 | |
495 | const MemoryRegionOps xive_tm_ops = { |
496 | .read = xive_tm_read, |
497 | .write = xive_tm_write, |
498 | .endianness = DEVICE_BIG_ENDIAN, |
499 | .valid = { |
500 | .min_access_size = 1, |
501 | .max_access_size = 8, |
502 | }, |
503 | .impl = { |
504 | .min_access_size = 1, |
505 | .max_access_size = 8, |
506 | }, |
507 | }; |
508 | |
509 | static char *xive_tctx_ring_print(uint8_t *ring) |
510 | { |
511 | uint32_t w2 = xive_tctx_word2(ring); |
512 | |
513 | return g_strdup_printf("%02x %02x %02x %02x %02x " |
514 | "%02x %02x %02x %08x" , |
515 | ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB], |
516 | ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR], |
517 | be32_to_cpu(w2)); |
518 | } |
519 | |
520 | static const char * const xive_tctx_ring_names[] = { |
521 | "USER" , "OS" , "POOL" , "PHYS" , |
522 | }; |
523 | |
524 | void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon) |
525 | { |
526 | int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; |
527 | int i; |
528 | |
529 | if (kvm_irqchip_in_kernel()) { |
530 | Error *local_err = NULL; |
531 | |
532 | kvmppc_xive_cpu_synchronize_state(tctx, &local_err); |
533 | if (local_err) { |
534 | error_report_err(local_err); |
535 | return; |
536 | } |
537 | } |
538 | |
539 | monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR" |
540 | " W2\n" , cpu_index); |
541 | |
542 | for (i = 0; i < XIVE_TM_RING_COUNT; i++) { |
543 | char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]); |
544 | monitor_printf(mon, "CPU[%04x]: %4s %s\n" , cpu_index, |
545 | xive_tctx_ring_names[i], s); |
546 | g_free(s); |
547 | } |
548 | } |
549 | |
550 | static void xive_tctx_reset(void *dev) |
551 | { |
552 | XiveTCTX *tctx = XIVE_TCTX(dev); |
553 | |
554 | memset(tctx->regs, 0, sizeof(tctx->regs)); |
555 | |
556 | /* Set some defaults */ |
557 | tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF; |
558 | tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF; |
559 | tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF; |
560 | |
561 | /* |
562 | * Initialize PIPR to 0xFF to avoid phantom interrupts when the |
563 | * CPPR is first set. |
564 | */ |
565 | tctx->regs[TM_QW1_OS + TM_PIPR] = |
566 | ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); |
567 | tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = |
568 | ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); |
569 | } |
570 | |
571 | static void xive_tctx_realize(DeviceState *dev, Error **errp) |
572 | { |
573 | XiveTCTX *tctx = XIVE_TCTX(dev); |
574 | PowerPCCPU *cpu; |
575 | CPUPPCState *env; |
576 | Object *obj; |
577 | Error *local_err = NULL; |
578 | |
579 | obj = object_property_get_link(OBJECT(dev), "cpu" , &local_err); |
580 | if (!obj) { |
581 | error_propagate(errp, local_err); |
582 | error_prepend(errp, "required link 'cpu' not found: " ); |
583 | return; |
584 | } |
585 | |
586 | cpu = POWERPC_CPU(obj); |
587 | tctx->cs = CPU(obj); |
588 | |
589 | env = &cpu->env; |
590 | switch (PPC_INPUT(env)) { |
591 | case PPC_FLAGS_INPUT_POWER9: |
592 | tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT]; |
593 | tctx->os_output = env->irq_inputs[POWER9_INPUT_INT]; |
594 | break; |
595 | |
596 | default: |
597 | error_setg(errp, "XIVE interrupt controller does not support " |
598 | "this CPU bus model" ); |
599 | return; |
600 | } |
601 | |
602 | /* Connect the presenter to the VCPU (required for CPU hotplug) */ |
603 | if (kvm_irqchip_in_kernel()) { |
604 | kvmppc_xive_cpu_connect(tctx, &local_err); |
605 | if (local_err) { |
606 | error_propagate(errp, local_err); |
607 | return; |
608 | } |
609 | } |
610 | |
611 | qemu_register_reset(xive_tctx_reset, dev); |
612 | } |
613 | |
614 | static void xive_tctx_unrealize(DeviceState *dev, Error **errp) |
615 | { |
616 | qemu_unregister_reset(xive_tctx_reset, dev); |
617 | } |
618 | |
619 | static int vmstate_xive_tctx_pre_save(void *opaque) |
620 | { |
621 | Error *local_err = NULL; |
622 | |
623 | if (kvm_irqchip_in_kernel()) { |
624 | kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err); |
625 | if (local_err) { |
626 | error_report_err(local_err); |
627 | return -1; |
628 | } |
629 | } |
630 | |
631 | return 0; |
632 | } |
633 | |
634 | static int vmstate_xive_tctx_post_load(void *opaque, int version_id) |
635 | { |
636 | Error *local_err = NULL; |
637 | |
638 | if (kvm_irqchip_in_kernel()) { |
639 | /* |
640 | * Required for hotplugged CPU, for which the state comes |
641 | * after all states of the machine. |
642 | */ |
643 | kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err); |
644 | if (local_err) { |
645 | error_report_err(local_err); |
646 | return -1; |
647 | } |
648 | } |
649 | |
650 | return 0; |
651 | } |
652 | |
653 | static const VMStateDescription vmstate_xive_tctx = { |
654 | .name = TYPE_XIVE_TCTX, |
655 | .version_id = 1, |
656 | .minimum_version_id = 1, |
657 | .pre_save = vmstate_xive_tctx_pre_save, |
658 | .post_load = vmstate_xive_tctx_post_load, |
659 | .fields = (VMStateField[]) { |
660 | VMSTATE_BUFFER(regs, XiveTCTX), |
661 | VMSTATE_END_OF_LIST() |
662 | }, |
663 | }; |
664 | |
665 | static void xive_tctx_class_init(ObjectClass *klass, void *data) |
666 | { |
667 | DeviceClass *dc = DEVICE_CLASS(klass); |
668 | |
669 | dc->desc = "XIVE Interrupt Thread Context" ; |
670 | dc->realize = xive_tctx_realize; |
671 | dc->unrealize = xive_tctx_unrealize; |
672 | dc->vmsd = &vmstate_xive_tctx; |
673 | } |
674 | |
675 | static const TypeInfo xive_tctx_info = { |
676 | .name = TYPE_XIVE_TCTX, |
677 | .parent = TYPE_DEVICE, |
678 | .instance_size = sizeof(XiveTCTX), |
679 | .class_init = xive_tctx_class_init, |
680 | }; |
681 | |
682 | Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp) |
683 | { |
684 | Error *local_err = NULL; |
685 | Object *obj; |
686 | |
687 | obj = object_new(TYPE_XIVE_TCTX); |
688 | object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort); |
689 | object_unref(obj); |
690 | object_property_add_const_link(obj, "cpu" , cpu, &error_abort); |
691 | object_property_set_bool(obj, true, "realized" , &local_err); |
692 | if (local_err) { |
693 | goto error; |
694 | } |
695 | |
696 | return obj; |
697 | |
698 | error: |
699 | object_unparent(obj); |
700 | error_propagate(errp, local_err); |
701 | return NULL; |
702 | } |
703 | |
704 | /* |
705 | * XIVE ESB helpers |
706 | */ |
707 | |
708 | static uint8_t xive_esb_set(uint8_t *pq, uint8_t value) |
709 | { |
710 | uint8_t old_pq = *pq & 0x3; |
711 | |
712 | *pq &= ~0x3; |
713 | *pq |= value & 0x3; |
714 | |
715 | return old_pq; |
716 | } |
717 | |
718 | static bool xive_esb_trigger(uint8_t *pq) |
719 | { |
720 | uint8_t old_pq = *pq & 0x3; |
721 | |
722 | switch (old_pq) { |
723 | case XIVE_ESB_RESET: |
724 | xive_esb_set(pq, XIVE_ESB_PENDING); |
725 | return true; |
726 | case XIVE_ESB_PENDING: |
727 | case XIVE_ESB_QUEUED: |
728 | xive_esb_set(pq, XIVE_ESB_QUEUED); |
729 | return false; |
730 | case XIVE_ESB_OFF: |
731 | xive_esb_set(pq, XIVE_ESB_OFF); |
732 | return false; |
733 | default: |
734 | g_assert_not_reached(); |
735 | } |
736 | } |
737 | |
738 | static bool xive_esb_eoi(uint8_t *pq) |
739 | { |
740 | uint8_t old_pq = *pq & 0x3; |
741 | |
742 | switch (old_pq) { |
743 | case XIVE_ESB_RESET: |
744 | case XIVE_ESB_PENDING: |
745 | xive_esb_set(pq, XIVE_ESB_RESET); |
746 | return false; |
747 | case XIVE_ESB_QUEUED: |
748 | xive_esb_set(pq, XIVE_ESB_PENDING); |
749 | return true; |
750 | case XIVE_ESB_OFF: |
751 | xive_esb_set(pq, XIVE_ESB_OFF); |
752 | return false; |
753 | default: |
754 | g_assert_not_reached(); |
755 | } |
756 | } |
757 | |
758 | /* |
759 | * XIVE Interrupt Source (or IVSE) |
760 | */ |
761 | |
762 | uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno) |
763 | { |
764 | assert(srcno < xsrc->nr_irqs); |
765 | |
766 | return xsrc->status[srcno] & 0x3; |
767 | } |
768 | |
769 | uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq) |
770 | { |
771 | assert(srcno < xsrc->nr_irqs); |
772 | |
773 | return xive_esb_set(&xsrc->status[srcno], pq); |
774 | } |
775 | |
776 | /* |
777 | * Returns whether the event notification should be forwarded. |
778 | */ |
779 | static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) |
780 | { |
781 | uint8_t old_pq = xive_source_esb_get(xsrc, srcno); |
782 | |
783 | xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; |
784 | |
785 | switch (old_pq) { |
786 | case XIVE_ESB_RESET: |
787 | xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING); |
788 | return true; |
789 | default: |
790 | return false; |
791 | } |
792 | } |
793 | |
794 | /* |
795 | * Returns whether the event notification should be forwarded. |
796 | */ |
797 | static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno) |
798 | { |
799 | bool ret; |
800 | |
801 | assert(srcno < xsrc->nr_irqs); |
802 | |
803 | ret = xive_esb_trigger(&xsrc->status[srcno]); |
804 | |
805 | if (xive_source_irq_is_lsi(xsrc, srcno) && |
806 | xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) { |
807 | qemu_log_mask(LOG_GUEST_ERROR, |
808 | "XIVE: queued an event on LSI IRQ %d\n" , srcno); |
809 | } |
810 | |
811 | return ret; |
812 | } |
813 | |
814 | /* |
815 | * Returns whether the event notification should be forwarded. |
816 | */ |
817 | static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) |
818 | { |
819 | bool ret; |
820 | |
821 | assert(srcno < xsrc->nr_irqs); |
822 | |
823 | ret = xive_esb_eoi(&xsrc->status[srcno]); |
824 | |
825 | /* |
826 | * LSI sources do not set the Q bit but they can still be |
827 | * asserted, in which case we should forward a new event |
828 | * notification |
829 | */ |
830 | if (xive_source_irq_is_lsi(xsrc, srcno) && |
831 | xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { |
832 | ret = xive_source_lsi_trigger(xsrc, srcno); |
833 | } |
834 | |
835 | return ret; |
836 | } |
837 | |
838 | /* |
839 | * Forward the source event notification to the Router |
840 | */ |
841 | static void xive_source_notify(XiveSource *xsrc, int srcno) |
842 | { |
843 | XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive); |
844 | |
845 | if (xnc->notify) { |
846 | xnc->notify(xsrc->xive, srcno); |
847 | } |
848 | } |
849 | |
850 | /* |
851 | * In a two pages ESB MMIO setting, even page is the trigger page, odd |
852 | * page is for management |
853 | */ |
854 | static inline bool addr_is_even(hwaddr addr, uint32_t shift) |
855 | { |
856 | return !((addr >> shift) & 1); |
857 | } |
858 | |
859 | static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr) |
860 | { |
861 | return xive_source_esb_has_2page(xsrc) && |
862 | addr_is_even(addr, xsrc->esb_shift - 1); |
863 | } |
864 | |
865 | /* |
866 | * ESB MMIO loads |
867 | * Trigger page Management/EOI page |
868 | * |
869 | * ESB MMIO setting 2 pages 1 or 2 pages |
870 | * |
871 | * 0x000 .. 0x3FF -1 EOI and return 0|1 |
872 | * 0x400 .. 0x7FF -1 EOI and return 0|1 |
873 | * 0x800 .. 0xBFF -1 return PQ |
874 | * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00 |
875 | * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01 |
876 | * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10 |
877 | * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11 |
878 | */ |
879 | static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) |
880 | { |
881 | XiveSource *xsrc = XIVE_SOURCE(opaque); |
882 | uint32_t offset = addr & 0xFFF; |
883 | uint32_t srcno = addr >> xsrc->esb_shift; |
884 | uint64_t ret = -1; |
885 | |
886 | /* In a two pages ESB MMIO setting, trigger page should not be read */ |
887 | if (xive_source_is_trigger_page(xsrc, addr)) { |
888 | qemu_log_mask(LOG_GUEST_ERROR, |
889 | "XIVE: invalid load on IRQ %d trigger page at " |
890 | "0x%" HWADDR_PRIx"\n" , srcno, addr); |
891 | return -1; |
892 | } |
893 | |
894 | switch (offset) { |
895 | case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: |
896 | ret = xive_source_esb_eoi(xsrc, srcno); |
897 | |
898 | /* Forward the source event notification for routing */ |
899 | if (ret) { |
900 | xive_source_notify(xsrc, srcno); |
901 | } |
902 | break; |
903 | |
904 | case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: |
905 | ret = xive_source_esb_get(xsrc, srcno); |
906 | break; |
907 | |
908 | case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: |
909 | case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: |
910 | case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: |
911 | case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: |
912 | ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); |
913 | break; |
914 | default: |
915 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n" , |
916 | offset); |
917 | } |
918 | |
919 | return ret; |
920 | } |
921 | |
922 | /* |
923 | * ESB MMIO stores |
924 | * Trigger page Management/EOI page |
925 | * |
926 | * ESB MMIO setting 2 pages 1 or 2 pages |
927 | * |
928 | * 0x000 .. 0x3FF Trigger Trigger |
929 | * 0x400 .. 0x7FF Trigger EOI |
930 | * 0x800 .. 0xBFF Trigger undefined |
931 | * 0xC00 .. 0xCFF Trigger PQ=00 |
932 | * 0xD00 .. 0xDFF Trigger PQ=01 |
933 | * 0xE00 .. 0xDFF Trigger PQ=10 |
934 | * 0xF00 .. 0xDFF Trigger PQ=11 |
935 | */ |
936 | static void xive_source_esb_write(void *opaque, hwaddr addr, |
937 | uint64_t value, unsigned size) |
938 | { |
939 | XiveSource *xsrc = XIVE_SOURCE(opaque); |
940 | uint32_t offset = addr & 0xFFF; |
941 | uint32_t srcno = addr >> xsrc->esb_shift; |
942 | bool notify = false; |
943 | |
944 | /* In a two pages ESB MMIO setting, trigger page only triggers */ |
945 | if (xive_source_is_trigger_page(xsrc, addr)) { |
946 | notify = xive_source_esb_trigger(xsrc, srcno); |
947 | goto out; |
948 | } |
949 | |
950 | switch (offset) { |
951 | case 0 ... 0x3FF: |
952 | notify = xive_source_esb_trigger(xsrc, srcno); |
953 | break; |
954 | |
955 | case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF: |
956 | if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) { |
957 | qemu_log_mask(LOG_GUEST_ERROR, |
958 | "XIVE: invalid Store EOI for IRQ %d\n" , srcno); |
959 | return; |
960 | } |
961 | |
962 | notify = xive_source_esb_eoi(xsrc, srcno); |
963 | break; |
964 | |
965 | case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: |
966 | case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: |
967 | case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: |
968 | case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: |
969 | xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3); |
970 | break; |
971 | |
972 | default: |
973 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n" , |
974 | offset); |
975 | return; |
976 | } |
977 | |
978 | out: |
979 | /* Forward the source event notification for routing */ |
980 | if (notify) { |
981 | xive_source_notify(xsrc, srcno); |
982 | } |
983 | } |
984 | |
985 | static const MemoryRegionOps xive_source_esb_ops = { |
986 | .read = xive_source_esb_read, |
987 | .write = xive_source_esb_write, |
988 | .endianness = DEVICE_BIG_ENDIAN, |
989 | .valid = { |
990 | .min_access_size = 8, |
991 | .max_access_size = 8, |
992 | }, |
993 | .impl = { |
994 | .min_access_size = 8, |
995 | .max_access_size = 8, |
996 | }, |
997 | }; |
998 | |
999 | void xive_source_set_irq(void *opaque, int srcno, int val) |
1000 | { |
1001 | XiveSource *xsrc = XIVE_SOURCE(opaque); |
1002 | bool notify = false; |
1003 | |
1004 | if (xive_source_irq_is_lsi(xsrc, srcno)) { |
1005 | if (val) { |
1006 | notify = xive_source_lsi_trigger(xsrc, srcno); |
1007 | } else { |
1008 | xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; |
1009 | } |
1010 | } else { |
1011 | if (val) { |
1012 | notify = xive_source_esb_trigger(xsrc, srcno); |
1013 | } |
1014 | } |
1015 | |
1016 | /* Forward the source event notification for routing */ |
1017 | if (notify) { |
1018 | xive_source_notify(xsrc, srcno); |
1019 | } |
1020 | } |
1021 | |
1022 | void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) |
1023 | { |
1024 | int i; |
1025 | |
1026 | for (i = 0; i < xsrc->nr_irqs; i++) { |
1027 | uint8_t pq = xive_source_esb_get(xsrc, i); |
1028 | |
1029 | if (pq == XIVE_ESB_OFF) { |
1030 | continue; |
1031 | } |
1032 | |
1033 | monitor_printf(mon, " %08x %s %c%c%c\n" , i + offset, |
1034 | xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI" , |
1035 | pq & XIVE_ESB_VAL_P ? 'P' : '-', |
1036 | pq & XIVE_ESB_VAL_Q ? 'Q' : '-', |
1037 | xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' '); |
1038 | } |
1039 | } |
1040 | |
1041 | static void xive_source_reset(void *dev) |
1042 | { |
1043 | XiveSource *xsrc = XIVE_SOURCE(dev); |
1044 | |
1045 | /* Do not clear the LSI bitmap */ |
1046 | |
1047 | /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */ |
1048 | memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs); |
1049 | } |
1050 | |
1051 | static void xive_source_realize(DeviceState *dev, Error **errp) |
1052 | { |
1053 | XiveSource *xsrc = XIVE_SOURCE(dev); |
1054 | Object *obj; |
1055 | Error *local_err = NULL; |
1056 | |
1057 | obj = object_property_get_link(OBJECT(dev), "xive" , &local_err); |
1058 | if (!obj) { |
1059 | error_propagate(errp, local_err); |
1060 | error_prepend(errp, "required link 'xive' not found: " ); |
1061 | return; |
1062 | } |
1063 | |
1064 | xsrc->xive = XIVE_NOTIFIER(obj); |
1065 | |
1066 | if (!xsrc->nr_irqs) { |
1067 | error_setg(errp, "Number of interrupt needs to be greater than 0" ); |
1068 | return; |
1069 | } |
1070 | |
1071 | if (xsrc->esb_shift != XIVE_ESB_4K && |
1072 | xsrc->esb_shift != XIVE_ESB_4K_2PAGE && |
1073 | xsrc->esb_shift != XIVE_ESB_64K && |
1074 | xsrc->esb_shift != XIVE_ESB_64K_2PAGE) { |
1075 | error_setg(errp, "Invalid ESB shift setting" ); |
1076 | return; |
1077 | } |
1078 | |
1079 | xsrc->status = g_malloc0(xsrc->nr_irqs); |
1080 | xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); |
1081 | |
1082 | if (!kvm_irqchip_in_kernel()) { |
1083 | memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), |
1084 | &xive_source_esb_ops, xsrc, "xive.esb" , |
1085 | (1ull << xsrc->esb_shift) * xsrc->nr_irqs); |
1086 | } |
1087 | |
1088 | qemu_register_reset(xive_source_reset, dev); |
1089 | } |
1090 | |
1091 | static const VMStateDescription vmstate_xive_source = { |
1092 | .name = TYPE_XIVE_SOURCE, |
1093 | .version_id = 1, |
1094 | .minimum_version_id = 1, |
1095 | .fields = (VMStateField[]) { |
1096 | VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), |
1097 | VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), |
1098 | VMSTATE_END_OF_LIST() |
1099 | }, |
1100 | }; |
1101 | |
1102 | /* |
1103 | * The default XIVE interrupt source setting for the ESB MMIOs is two |
1104 | * 64k pages without Store EOI, to be in sync with KVM. |
1105 | */ |
1106 | static Property xive_source_properties[] = { |
1107 | DEFINE_PROP_UINT64("flags" , XiveSource, esb_flags, 0), |
1108 | DEFINE_PROP_UINT32("nr-irqs" , XiveSource, nr_irqs, 0), |
1109 | DEFINE_PROP_UINT32("shift" , XiveSource, esb_shift, XIVE_ESB_64K_2PAGE), |
1110 | DEFINE_PROP_END_OF_LIST(), |
1111 | }; |
1112 | |
1113 | static void xive_source_class_init(ObjectClass *klass, void *data) |
1114 | { |
1115 | DeviceClass *dc = DEVICE_CLASS(klass); |
1116 | |
1117 | dc->desc = "XIVE Interrupt Source" ; |
1118 | dc->props = xive_source_properties; |
1119 | dc->realize = xive_source_realize; |
1120 | dc->vmsd = &vmstate_xive_source; |
1121 | } |
1122 | |
1123 | static const TypeInfo xive_source_info = { |
1124 | .name = TYPE_XIVE_SOURCE, |
1125 | .parent = TYPE_DEVICE, |
1126 | .instance_size = sizeof(XiveSource), |
1127 | .class_init = xive_source_class_init, |
1128 | }; |
1129 | |
1130 | /* |
1131 | * XiveEND helpers |
1132 | */ |
1133 | |
1134 | void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) |
1135 | { |
1136 | uint64_t qaddr_base = xive_end_qaddr(end); |
1137 | uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); |
1138 | uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); |
1139 | uint32_t qentries = 1 << (qsize + 10); |
1140 | int i; |
1141 | |
1142 | /* |
1143 | * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window |
1144 | */ |
1145 | monitor_printf(mon, " [ " ); |
1146 | qindex = (qindex - (width - 1)) & (qentries - 1); |
1147 | for (i = 0; i < width; i++) { |
1148 | uint64_t qaddr = qaddr_base + (qindex << 2); |
1149 | uint32_t qdata = -1; |
1150 | |
1151 | if (dma_memory_read(&address_space_memory, qaddr, &qdata, |
1152 | sizeof(qdata))) { |
1153 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" |
1154 | HWADDR_PRIx "\n" , qaddr); |
1155 | return; |
1156 | } |
1157 | monitor_printf(mon, "%s%08x " , i == width - 1 ? "^" : "" , |
1158 | be32_to_cpu(qdata)); |
1159 | qindex = (qindex + 1) & (qentries - 1); |
1160 | } |
1161 | monitor_printf(mon, "]" ); |
1162 | } |
1163 | |
1164 | void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon) |
1165 | { |
1166 | uint64_t qaddr_base = xive_end_qaddr(end); |
1167 | uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); |
1168 | uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); |
1169 | uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); |
1170 | uint32_t qentries = 1 << (qsize + 10); |
1171 | |
1172 | uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6); |
1173 | uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6); |
1174 | uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); |
1175 | uint8_t pq; |
1176 | |
1177 | if (!xive_end_is_valid(end)) { |
1178 | return; |
1179 | } |
1180 | |
1181 | pq = xive_get_field32(END_W1_ESn, end->w1); |
1182 | |
1183 | monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c prio:%d nvt:%02x/%04x" , |
1184 | end_idx, |
1185 | pq & XIVE_ESB_VAL_P ? 'P' : '-', |
1186 | pq & XIVE_ESB_VAL_Q ? 'Q' : '-', |
1187 | xive_end_is_valid(end) ? 'v' : '-', |
1188 | xive_end_is_enqueue(end) ? 'q' : '-', |
1189 | xive_end_is_notify(end) ? 'n' : '-', |
1190 | xive_end_is_backlog(end) ? 'b' : '-', |
1191 | xive_end_is_escalate(end) ? 'e' : '-', |
1192 | xive_end_is_uncond_escalation(end) ? 'u' : '-', |
1193 | xive_end_is_silent_escalation(end) ? 's' : '-', |
1194 | priority, nvt_blk, nvt_idx); |
1195 | |
1196 | if (qaddr_base) { |
1197 | monitor_printf(mon, " eq:@%08" PRIx64"% 6d/%5d ^%d" , |
1198 | qaddr_base, qindex, qentries, qgen); |
1199 | xive_end_queue_pic_print_info(end, 6, mon); |
1200 | } |
1201 | monitor_printf(mon, "\n" ); |
1202 | } |
1203 | |
1204 | static void xive_end_enqueue(XiveEND *end, uint32_t data) |
1205 | { |
1206 | uint64_t qaddr_base = xive_end_qaddr(end); |
1207 | uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); |
1208 | uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); |
1209 | uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); |
1210 | |
1211 | uint64_t qaddr = qaddr_base + (qindex << 2); |
1212 | uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); |
1213 | uint32_t qentries = 1 << (qsize + 10); |
1214 | |
1215 | if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) { |
1216 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" |
1217 | HWADDR_PRIx "\n" , qaddr); |
1218 | return; |
1219 | } |
1220 | |
1221 | qindex = (qindex + 1) & (qentries - 1); |
1222 | if (qindex == 0) { |
1223 | qgen ^= 1; |
1224 | end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen); |
1225 | } |
1226 | end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex); |
1227 | } |
1228 | |
1229 | void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, |
1230 | Monitor *mon) |
1231 | { |
1232 | XiveEAS *eas = (XiveEAS *) &end->w4; |
1233 | uint8_t pq; |
1234 | |
1235 | if (!xive_end_is_escalate(end)) { |
1236 | return; |
1237 | } |
1238 | |
1239 | pq = xive_get_field32(END_W1_ESe, end->w1); |
1240 | |
1241 | monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n" , |
1242 | end_idx, |
1243 | pq & XIVE_ESB_VAL_P ? 'P' : '-', |
1244 | pq & XIVE_ESB_VAL_Q ? 'Q' : '-', |
1245 | xive_eas_is_valid(eas) ? 'V' : ' ', |
1246 | xive_eas_is_masked(eas) ? 'M' : ' ', |
1247 | (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), |
1248 | (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), |
1249 | (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); |
1250 | } |
1251 | |
1252 | /* |
1253 | * XIVE Router (aka. Virtualization Controller or IVRE) |
1254 | */ |
1255 | |
1256 | int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx, |
1257 | XiveEAS *eas) |
1258 | { |
1259 | XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); |
1260 | |
1261 | return xrc->get_eas(xrtr, eas_blk, eas_idx, eas); |
1262 | } |
1263 | |
1264 | int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, |
1265 | XiveEND *end) |
1266 | { |
1267 | XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); |
1268 | |
1269 | return xrc->get_end(xrtr, end_blk, end_idx, end); |
1270 | } |
1271 | |
1272 | int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx, |
1273 | XiveEND *end, uint8_t word_number) |
1274 | { |
1275 | XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); |
1276 | |
1277 | return xrc->write_end(xrtr, end_blk, end_idx, end, word_number); |
1278 | } |
1279 | |
1280 | int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, |
1281 | XiveNVT *nvt) |
1282 | { |
1283 | XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); |
1284 | |
1285 | return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt); |
1286 | } |
1287 | |
1288 | int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx, |
1289 | XiveNVT *nvt, uint8_t word_number) |
1290 | { |
1291 | XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); |
1292 | |
1293 | return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number); |
1294 | } |
1295 | |
1296 | XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs) |
1297 | { |
1298 | XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr); |
1299 | |
1300 | return xrc->get_tctx(xrtr, cs); |
1301 | } |
1302 | |
1303 | /* |
1304 | * Encode the HW CAM line in the block group mode format : |
1305 | * |
1306 | * chip << 19 | 0000000 0 0001 thread (7Bit) |
1307 | */ |
1308 | static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx) |
1309 | { |
1310 | CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; |
1311 | uint32_t pir = env->spr_cb[SPR_PIR].default_value; |
1312 | |
1313 | return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f)); |
1314 | } |
1315 | |
1316 | /* |
1317 | * The thread context register words are in big-endian format. |
1318 | */ |
1319 | static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format, |
1320 | uint8_t nvt_blk, uint32_t nvt_idx, |
1321 | bool cam_ignore, uint32_t logic_serv) |
1322 | { |
1323 | uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx); |
1324 | uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); |
1325 | uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); |
1326 | uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); |
1327 | uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); |
1328 | |
1329 | /* |
1330 | * TODO (PowerNV): ignore mode. The low order bits of the NVT |
1331 | * identifier are ignored in the "CAM" match. |
1332 | */ |
1333 | |
1334 | if (format == 0) { |
1335 | if (cam_ignore == true) { |
1336 | /* |
1337 | * F=0 & i=1: Logical server notification (bits ignored at |
1338 | * the end of the NVT identifier) |
1339 | */ |
1340 | qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n" , |
1341 | nvt_blk, nvt_idx); |
1342 | return -1; |
1343 | } |
1344 | |
1345 | /* F=0 & i=0: Specific NVT notification */ |
1346 | |
1347 | /* PHYS ring */ |
1348 | if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) && |
1349 | cam == xive_tctx_hw_cam_line(tctx)) { |
1350 | return TM_QW3_HV_PHYS; |
1351 | } |
1352 | |
1353 | /* HV POOL ring */ |
1354 | if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) && |
1355 | cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) { |
1356 | return TM_QW2_HV_POOL; |
1357 | } |
1358 | |
1359 | /* OS ring */ |
1360 | if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && |
1361 | cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) { |
1362 | return TM_QW1_OS; |
1363 | } |
1364 | } else { |
1365 | /* F=1 : User level Event-Based Branch (EBB) notification */ |
1366 | |
1367 | /* USER ring */ |
1368 | if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) && |
1369 | (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) && |
1370 | (be32_to_cpu(qw0w2) & TM_QW0W2_VU) && |
1371 | (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) { |
1372 | return TM_QW0_USER; |
1373 | } |
1374 | } |
1375 | return -1; |
1376 | } |
1377 | |
1378 | typedef struct XiveTCTXMatch { |
1379 | XiveTCTX *tctx; |
1380 | uint8_t ring; |
1381 | } XiveTCTXMatch; |
1382 | |
1383 | static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format, |
1384 | uint8_t nvt_blk, uint32_t nvt_idx, |
1385 | bool cam_ignore, uint8_t priority, |
1386 | uint32_t logic_serv, XiveTCTXMatch *match) |
1387 | { |
1388 | CPUState *cs; |
1389 | |
1390 | /* |
1391 | * TODO (PowerNV): handle chip_id overwrite of block field for |
1392 | * hardwired CAM compares |
1393 | */ |
1394 | |
1395 | CPU_FOREACH(cs) { |
1396 | XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs); |
1397 | int ring; |
1398 | |
1399 | /* |
1400 | * HW checks that the CPU is enabled in the Physical Thread |
1401 | * Enable Register (PTER). |
1402 | */ |
1403 | |
1404 | /* |
1405 | * Check the thread context CAM lines and record matches. We |
1406 | * will handle CPU exception delivery later |
1407 | */ |
1408 | ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx, |
1409 | cam_ignore, logic_serv); |
1410 | /* |
1411 | * Save the context and follow on to catch duplicates, that we |
1412 | * don't support yet. |
1413 | */ |
1414 | if (ring != -1) { |
1415 | if (match->tctx) { |
1416 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " |
1417 | "context NVT %x/%x\n" , nvt_blk, nvt_idx); |
1418 | return false; |
1419 | } |
1420 | |
1421 | match->ring = ring; |
1422 | match->tctx = tctx; |
1423 | } |
1424 | } |
1425 | |
1426 | if (!match->tctx) { |
1427 | qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n" , |
1428 | nvt_blk, nvt_idx); |
1429 | return false; |
1430 | } |
1431 | |
1432 | return true; |
1433 | } |
1434 | |
1435 | /* |
1436 | * This is our simple Xive Presenter Engine model. It is merged in the |
1437 | * Router as it does not require an extra object. |
1438 | * |
1439 | * It receives notification requests sent by the IVRE to find one |
1440 | * matching NVT (or more) dispatched on the processor threads. In case |
1441 | * of a single NVT notification, the process is abreviated and the |
1442 | * thread is signaled if a match is found. In case of a logical server |
1443 | * notification (bits ignored at the end of the NVT identifier), the |
1444 | * IVPE and IVRE select a winning thread using different filters. This |
1445 | * involves 2 or 3 exchanges on the PowerBus that the model does not |
1446 | * support. |
1447 | * |
1448 | * The parameters represent what is sent on the PowerBus |
1449 | */ |
1450 | static bool xive_presenter_notify(XiveRouter *xrtr, uint8_t format, |
1451 | uint8_t nvt_blk, uint32_t nvt_idx, |
1452 | bool cam_ignore, uint8_t priority, |
1453 | uint32_t logic_serv) |
1454 | { |
1455 | XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; |
1456 | bool found; |
1457 | |
1458 | found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore, |
1459 | priority, logic_serv, &match); |
1460 | if (found) { |
1461 | ipb_update(&match.tctx->regs[match.ring], priority); |
1462 | xive_tctx_notify(match.tctx, match.ring); |
1463 | } |
1464 | |
1465 | return found; |
1466 | } |
1467 | |
1468 | /* |
1469 | * Notification using the END ESe/ESn bit (Event State Buffer for |
1470 | * escalation and notification). Profide futher coalescing in the |
1471 | * Router. |
1472 | */ |
1473 | static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk, |
1474 | uint32_t end_idx, XiveEND *end, |
1475 | uint32_t end_esmask) |
1476 | { |
1477 | uint8_t pq = xive_get_field32(end_esmask, end->w1); |
1478 | bool notify = xive_esb_trigger(&pq); |
1479 | |
1480 | if (pq != xive_get_field32(end_esmask, end->w1)) { |
1481 | end->w1 = xive_set_field32(end_esmask, end->w1, pq); |
1482 | xive_router_write_end(xrtr, end_blk, end_idx, end, 1); |
1483 | } |
1484 | |
1485 | /* ESe/n[Q]=1 : end of notification */ |
1486 | return notify; |
1487 | } |
1488 | |
1489 | /* |
1490 | * An END trigger can come from an event trigger (IPI or HW) or from |
1491 | * another chip. We don't model the PowerBus but the END trigger |
1492 | * message has the same parameters than in the function below. |
1493 | */ |
1494 | static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, |
1495 | uint32_t end_idx, uint32_t end_data) |
1496 | { |
1497 | XiveEND end; |
1498 | uint8_t priority; |
1499 | uint8_t format; |
1500 | uint8_t nvt_blk; |
1501 | uint32_t nvt_idx; |
1502 | XiveNVT nvt; |
1503 | bool found; |
1504 | |
1505 | /* END cache lookup */ |
1506 | if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) { |
1507 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n" , end_blk, |
1508 | end_idx); |
1509 | return; |
1510 | } |
1511 | |
1512 | if (!xive_end_is_valid(&end)) { |
1513 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n" , |
1514 | end_blk, end_idx); |
1515 | return; |
1516 | } |
1517 | |
1518 | if (xive_end_is_enqueue(&end)) { |
1519 | xive_end_enqueue(&end, end_data); |
1520 | /* Enqueuing event data modifies the EQ toggle and index */ |
1521 | xive_router_write_end(xrtr, end_blk, end_idx, &end, 1); |
1522 | } |
1523 | |
1524 | /* |
1525 | * When the END is silent, we skip the notification part. |
1526 | */ |
1527 | if (xive_end_is_silent_escalation(&end)) { |
1528 | goto do_escalation; |
1529 | } |
1530 | |
1531 | /* |
1532 | * The W7 format depends on the F bit in W6. It defines the type |
1533 | * of the notification : |
1534 | * |
1535 | * F=0 : single or multiple NVT notification |
1536 | * F=1 : User level Event-Based Branch (EBB) notification, no |
1537 | * priority |
1538 | */ |
1539 | format = xive_get_field32(END_W6_FORMAT_BIT, end.w6); |
1540 | priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7); |
1541 | |
1542 | /* The END is masked */ |
1543 | if (format == 0 && priority == 0xff) { |
1544 | return; |
1545 | } |
1546 | |
1547 | /* |
1548 | * Check the END ESn (Event State Buffer for notification) for |
1549 | * even futher coalescing in the Router |
1550 | */ |
1551 | if (!xive_end_is_notify(&end)) { |
1552 | /* ESn[Q]=1 : end of notification */ |
1553 | if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, |
1554 | &end, END_W1_ESn)) { |
1555 | return; |
1556 | } |
1557 | } |
1558 | |
1559 | /* |
1560 | * Follows IVPE notification |
1561 | */ |
1562 | nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6); |
1563 | nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6); |
1564 | |
1565 | /* NVT cache lookup */ |
1566 | if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) { |
1567 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n" , |
1568 | nvt_blk, nvt_idx); |
1569 | return; |
1570 | } |
1571 | |
1572 | if (!xive_nvt_is_valid(&nvt)) { |
1573 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n" , |
1574 | nvt_blk, nvt_idx); |
1575 | return; |
1576 | } |
1577 | |
1578 | found = xive_presenter_notify(xrtr, format, nvt_blk, nvt_idx, |
1579 | xive_get_field32(END_W7_F0_IGNORE, end.w7), |
1580 | priority, |
1581 | xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); |
1582 | |
1583 | /* TODO: Auto EOI. */ |
1584 | |
1585 | if (found) { |
1586 | return; |
1587 | } |
1588 | |
1589 | /* |
1590 | * If no matching NVT is dispatched on a HW thread : |
1591 | * - specific VP: update the NVT structure if backlog is activated |
1592 | * - logical server : forward request to IVPE (not supported) |
1593 | */ |
1594 | if (xive_end_is_backlog(&end)) { |
1595 | if (format == 1) { |
1596 | qemu_log_mask(LOG_GUEST_ERROR, |
1597 | "XIVE: END %x/%x invalid config: F1 & backlog\n" , |
1598 | end_blk, end_idx); |
1599 | return; |
1600 | } |
1601 | /* Record the IPB in the associated NVT structure */ |
1602 | ipb_update((uint8_t *) &nvt.w4, priority); |
1603 | xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); |
1604 | |
1605 | /* |
1606 | * On HW, follows a "Broadcast Backlog" to IVPEs |
1607 | */ |
1608 | } |
1609 | |
1610 | do_escalation: |
1611 | /* |
1612 | * If activated, escalate notification using the ESe PQ bits and |
1613 | * the EAS in w4-5 |
1614 | */ |
1615 | if (!xive_end_is_escalate(&end)) { |
1616 | return; |
1617 | } |
1618 | |
1619 | /* |
1620 | * Check the END ESe (Event State Buffer for escalation) for even |
1621 | * futher coalescing in the Router |
1622 | */ |
1623 | if (!xive_end_is_uncond_escalation(&end)) { |
1624 | /* ESe[Q]=1 : end of notification */ |
1625 | if (!xive_router_end_es_notify(xrtr, end_blk, end_idx, |
1626 | &end, END_W1_ESe)) { |
1627 | return; |
1628 | } |
1629 | } |
1630 | |
1631 | /* |
1632 | * The END trigger becomes an Escalation trigger |
1633 | */ |
1634 | xive_router_end_notify(xrtr, |
1635 | xive_get_field32(END_W4_ESC_END_BLOCK, end.w4), |
1636 | xive_get_field32(END_W4_ESC_END_INDEX, end.w4), |
1637 | xive_get_field32(END_W5_ESC_END_DATA, end.w5)); |
1638 | } |
1639 | |
1640 | void xive_router_notify(XiveNotifier *xn, uint32_t lisn) |
1641 | { |
1642 | XiveRouter *xrtr = XIVE_ROUTER(xn); |
1643 | uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn); |
1644 | uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn); |
1645 | XiveEAS eas; |
1646 | |
1647 | /* EAS cache lookup */ |
1648 | if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) { |
1649 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n" , lisn); |
1650 | return; |
1651 | } |
1652 | |
1653 | /* |
1654 | * The IVRE checks the State Bit Cache at this point. We skip the |
1655 | * SBC lookup because the state bits of the sources are modeled |
1656 | * internally in QEMU. |
1657 | */ |
1658 | |
1659 | if (!xive_eas_is_valid(&eas)) { |
1660 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n" , lisn); |
1661 | return; |
1662 | } |
1663 | |
1664 | if (xive_eas_is_masked(&eas)) { |
1665 | /* Notification completed */ |
1666 | return; |
1667 | } |
1668 | |
1669 | /* |
1670 | * The event trigger becomes an END trigger |
1671 | */ |
1672 | xive_router_end_notify(xrtr, |
1673 | xive_get_field64(EAS_END_BLOCK, eas.w), |
1674 | xive_get_field64(EAS_END_INDEX, eas.w), |
1675 | xive_get_field64(EAS_END_DATA, eas.w)); |
1676 | } |
1677 | |
1678 | static void xive_router_class_init(ObjectClass *klass, void *data) |
1679 | { |
1680 | DeviceClass *dc = DEVICE_CLASS(klass); |
1681 | XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); |
1682 | |
1683 | dc->desc = "XIVE Router Engine" ; |
1684 | xnc->notify = xive_router_notify; |
1685 | } |
1686 | |
1687 | static const TypeInfo xive_router_info = { |
1688 | .name = TYPE_XIVE_ROUTER, |
1689 | .parent = TYPE_SYS_BUS_DEVICE, |
1690 | .abstract = true, |
1691 | .class_size = sizeof(XiveRouterClass), |
1692 | .class_init = xive_router_class_init, |
1693 | .interfaces = (InterfaceInfo[]) { |
1694 | { TYPE_XIVE_NOTIFIER }, |
1695 | { } |
1696 | } |
1697 | }; |
1698 | |
1699 | void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon) |
1700 | { |
1701 | if (!xive_eas_is_valid(eas)) { |
1702 | return; |
1703 | } |
1704 | |
1705 | monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n" , |
1706 | lisn, xive_eas_is_masked(eas) ? "M" : " " , |
1707 | (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w), |
1708 | (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w), |
1709 | (uint32_t) xive_get_field64(EAS_END_DATA, eas->w)); |
1710 | } |
1711 | |
1712 | /* |
1713 | * END ESB MMIO loads |
1714 | */ |
1715 | static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size) |
1716 | { |
1717 | XiveENDSource *xsrc = XIVE_END_SOURCE(opaque); |
1718 | uint32_t offset = addr & 0xFFF; |
1719 | uint8_t end_blk; |
1720 | uint32_t end_idx; |
1721 | XiveEND end; |
1722 | uint32_t end_esmask; |
1723 | uint8_t pq; |
1724 | uint64_t ret = -1; |
1725 | |
1726 | end_blk = xsrc->block_id; |
1727 | end_idx = addr >> (xsrc->esb_shift + 1); |
1728 | |
1729 | if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) { |
1730 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n" , end_blk, |
1731 | end_idx); |
1732 | return -1; |
1733 | } |
1734 | |
1735 | if (!xive_end_is_valid(&end)) { |
1736 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n" , |
1737 | end_blk, end_idx); |
1738 | return -1; |
1739 | } |
1740 | |
1741 | end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe; |
1742 | pq = xive_get_field32(end_esmask, end.w1); |
1743 | |
1744 | switch (offset) { |
1745 | case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF: |
1746 | ret = xive_esb_eoi(&pq); |
1747 | |
1748 | /* Forward the source event notification for routing ?? */ |
1749 | break; |
1750 | |
1751 | case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF: |
1752 | ret = pq; |
1753 | break; |
1754 | |
1755 | case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF: |
1756 | case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF: |
1757 | case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF: |
1758 | case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF: |
1759 | ret = xive_esb_set(&pq, (offset >> 8) & 0x3); |
1760 | break; |
1761 | default: |
1762 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n" , |
1763 | offset); |
1764 | return -1; |
1765 | } |
1766 | |
1767 | if (pq != xive_get_field32(end_esmask, end.w1)) { |
1768 | end.w1 = xive_set_field32(end_esmask, end.w1, pq); |
1769 | xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1); |
1770 | } |
1771 | |
1772 | return ret; |
1773 | } |
1774 | |
1775 | /* |
1776 | * END ESB MMIO stores are invalid |
1777 | */ |
1778 | static void xive_end_source_write(void *opaque, hwaddr addr, |
1779 | uint64_t value, unsigned size) |
1780 | { |
1781 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%" |
1782 | HWADDR_PRIx"\n" , addr); |
1783 | } |
1784 | |
1785 | static const MemoryRegionOps xive_end_source_ops = { |
1786 | .read = xive_end_source_read, |
1787 | .write = xive_end_source_write, |
1788 | .endianness = DEVICE_BIG_ENDIAN, |
1789 | .valid = { |
1790 | .min_access_size = 8, |
1791 | .max_access_size = 8, |
1792 | }, |
1793 | .impl = { |
1794 | .min_access_size = 8, |
1795 | .max_access_size = 8, |
1796 | }, |
1797 | }; |
1798 | |
1799 | static void xive_end_source_realize(DeviceState *dev, Error **errp) |
1800 | { |
1801 | XiveENDSource *xsrc = XIVE_END_SOURCE(dev); |
1802 | Object *obj; |
1803 | Error *local_err = NULL; |
1804 | |
1805 | obj = object_property_get_link(OBJECT(dev), "xive" , &local_err); |
1806 | if (!obj) { |
1807 | error_propagate(errp, local_err); |
1808 | error_prepend(errp, "required link 'xive' not found: " ); |
1809 | return; |
1810 | } |
1811 | |
1812 | xsrc->xrtr = XIVE_ROUTER(obj); |
1813 | |
1814 | if (!xsrc->nr_ends) { |
1815 | error_setg(errp, "Number of interrupt needs to be greater than 0" ); |
1816 | return; |
1817 | } |
1818 | |
1819 | if (xsrc->esb_shift != XIVE_ESB_4K && |
1820 | xsrc->esb_shift != XIVE_ESB_64K) { |
1821 | error_setg(errp, "Invalid ESB shift setting" ); |
1822 | return; |
1823 | } |
1824 | |
1825 | /* |
1826 | * Each END is assigned an even/odd pair of MMIO pages, the even page |
1827 | * manages the ESn field while the odd page manages the ESe field. |
1828 | */ |
1829 | memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), |
1830 | &xive_end_source_ops, xsrc, "xive.end" , |
1831 | (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends); |
1832 | } |
1833 | |
1834 | static Property xive_end_source_properties[] = { |
1835 | DEFINE_PROP_UINT8("block-id" , XiveENDSource, block_id, 0), |
1836 | DEFINE_PROP_UINT32("nr-ends" , XiveENDSource, nr_ends, 0), |
1837 | DEFINE_PROP_UINT32("shift" , XiveENDSource, esb_shift, XIVE_ESB_64K), |
1838 | DEFINE_PROP_END_OF_LIST(), |
1839 | }; |
1840 | |
1841 | static void xive_end_source_class_init(ObjectClass *klass, void *data) |
1842 | { |
1843 | DeviceClass *dc = DEVICE_CLASS(klass); |
1844 | |
1845 | dc->desc = "XIVE END Source" ; |
1846 | dc->props = xive_end_source_properties; |
1847 | dc->realize = xive_end_source_realize; |
1848 | } |
1849 | |
1850 | static const TypeInfo xive_end_source_info = { |
1851 | .name = TYPE_XIVE_END_SOURCE, |
1852 | .parent = TYPE_DEVICE, |
1853 | .instance_size = sizeof(XiveENDSource), |
1854 | .class_init = xive_end_source_class_init, |
1855 | }; |
1856 | |
1857 | /* |
1858 | * XIVE Notifier |
1859 | */ |
1860 | static const TypeInfo xive_notifier_info = { |
1861 | .name = TYPE_XIVE_NOTIFIER, |
1862 | .parent = TYPE_INTERFACE, |
1863 | .class_size = sizeof(XiveNotifierClass), |
1864 | }; |
1865 | |
1866 | static void xive_register_types(void) |
1867 | { |
1868 | type_register_static(&xive_source_info); |
1869 | type_register_static(&xive_notifier_info); |
1870 | type_register_static(&xive_router_info); |
1871 | type_register_static(&xive_end_source_info); |
1872 | type_register_static(&xive_tctx_info); |
1873 | } |
1874 | |
1875 | type_init(xive_register_types) |
1876 | |