1 | /* |
2 | * QEMU PowerPC XIVE interrupt controller model |
3 | * |
4 | * Copyright (c) 2017-2019, IBM Corporation. |
5 | * |
6 | * This code is licensed under the GPL version 2 or later. See the |
7 | * COPYING file in the top-level directory. |
8 | */ |
9 | |
10 | #include "qemu/osdep.h" |
11 | #include "qemu/log.h" |
12 | #include "qemu/module.h" |
13 | #include "qapi/error.h" |
14 | #include "target/ppc/cpu.h" |
15 | #include "sysemu/cpus.h" |
16 | #include "sysemu/dma.h" |
17 | #include "sysemu/reset.h" |
18 | #include "monitor/monitor.h" |
19 | #include "hw/ppc/fdt.h" |
20 | #include "hw/ppc/pnv.h" |
21 | #include "hw/ppc/pnv_core.h" |
22 | #include "hw/ppc/pnv_xscom.h" |
23 | #include "hw/ppc/pnv_xive.h" |
24 | #include "hw/ppc/xive_regs.h" |
25 | #include "hw/qdev-properties.h" |
26 | #include "hw/ppc/ppc.h" |
27 | |
28 | #include <libfdt.h> |
29 | |
30 | #include "pnv_xive_regs.h" |
31 | |
32 | #define XIVE_DEBUG |
33 | |
34 | /* |
35 | * Virtual structures table (VST) |
36 | */ |
37 | #define SBE_PER_BYTE 4 |
38 | |
39 | typedef struct XiveVstInfo { |
40 | const char *name; |
41 | uint32_t size; |
42 | uint32_t max_blocks; |
43 | } XiveVstInfo; |
44 | |
45 | static const XiveVstInfo vst_infos[] = { |
46 | [VST_TSEL_IVT] = { "EAT" , sizeof(XiveEAS), 16 }, |
47 | [VST_TSEL_SBE] = { "SBE" , 1, 16 }, |
48 | [VST_TSEL_EQDT] = { "ENDT" , sizeof(XiveEND), 16 }, |
49 | [VST_TSEL_VPDT] = { "VPDT" , sizeof(XiveNVT), 32 }, |
50 | |
51 | /* |
52 | * Interrupt fifo backing store table (not modeled) : |
53 | * |
54 | * 0 - IPI, |
55 | * 1 - HWD, |
56 | * 2 - First escalate, |
57 | * 3 - Second escalate, |
58 | * 4 - Redistribution, |
59 | * 5 - IPI cascaded queue ? |
60 | */ |
61 | [VST_TSEL_IRQ] = { "IRQ" , 1, 6 }, |
62 | }; |
63 | |
64 | #define xive_error(xive, fmt, ...) \ |
65 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ |
66 | (xive)->chip->chip_id, ## __VA_ARGS__); |
67 | |
68 | /* |
69 | * QEMU version of the GETFIELD/SETFIELD macros |
70 | * |
71 | * TODO: It might be better to use the existing extract64() and |
72 | * deposit64() but this means that all the register definitions will |
73 | * change and become incompatible with the ones found in skiboot. |
74 | * |
75 | * Keep it as it is for now until we find a common ground. |
76 | */ |
77 | static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) |
78 | { |
79 | return (word & mask) >> ctz64(mask); |
80 | } |
81 | |
82 | static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, |
83 | uint64_t value) |
84 | { |
85 | return (word & ~mask) | ((value << ctz64(mask)) & mask); |
86 | } |
87 | |
88 | /* |
89 | * Remote access to controllers. HW uses MMIOs. For now, a simple scan |
90 | * of the chips is good enough. |
91 | * |
92 | * TODO: Block scope support |
93 | */ |
94 | static PnvXive *pnv_xive_get_ic(uint8_t blk) |
95 | { |
96 | PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); |
97 | int i; |
98 | |
99 | for (i = 0; i < pnv->num_chips; i++) { |
100 | Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); |
101 | PnvXive *xive = &chip9->xive; |
102 | |
103 | if (xive->chip->chip_id == blk) { |
104 | return xive; |
105 | } |
106 | } |
107 | return NULL; |
108 | } |
109 | |
110 | /* |
111 | * VST accessors for SBE, EAT, ENDT, NVT |
112 | * |
113 | * Indirect VST tables are arrays of VSDs pointing to a page (of same |
114 | * size). Each page is a direct VST table. |
115 | */ |
116 | |
117 | #define XIVE_VSD_SIZE 8 |
118 | |
119 | /* Indirect page size can be 4K, 64K, 2M, 16M. */ |
120 | static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) |
121 | { |
122 | return page_shift == 12 || page_shift == 16 || |
123 | page_shift == 21 || page_shift == 24; |
124 | } |
125 | |
126 | static uint64_t pnv_xive_vst_size(uint64_t vsd) |
127 | { |
128 | uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); |
129 | |
130 | /* |
131 | * Read the first descriptor to get the page size of the indirect |
132 | * table. |
133 | */ |
134 | if (VSD_INDIRECT & vsd) { |
135 | uint32_t nr_pages = vst_tsize / XIVE_VSD_SIZE; |
136 | uint32_t page_shift; |
137 | |
138 | vsd = ldq_be_dma(&address_space_memory, vsd & VSD_ADDRESS_MASK); |
139 | page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; |
140 | |
141 | if (!pnv_xive_vst_page_size_allowed(page_shift)) { |
142 | return 0; |
143 | } |
144 | |
145 | return nr_pages * (1ull << page_shift); |
146 | } |
147 | |
148 | return vst_tsize; |
149 | } |
150 | |
151 | static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, |
152 | uint64_t vsd, uint32_t idx) |
153 | { |
154 | const XiveVstInfo *info = &vst_infos[type]; |
155 | uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; |
156 | |
157 | return vst_addr + idx * info->size; |
158 | } |
159 | |
160 | static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, |
161 | uint64_t vsd, uint32_t idx) |
162 | { |
163 | const XiveVstInfo *info = &vst_infos[type]; |
164 | uint64_t vsd_addr; |
165 | uint32_t vsd_idx; |
166 | uint32_t page_shift; |
167 | uint32_t vst_per_page; |
168 | |
169 | /* Get the page size of the indirect table. */ |
170 | vsd_addr = vsd & VSD_ADDRESS_MASK; |
171 | vsd = ldq_be_dma(&address_space_memory, vsd_addr); |
172 | |
173 | if (!(vsd & VSD_ADDRESS_MASK)) { |
174 | xive_error(xive, "VST: invalid %s entry %x !?" , info->name, idx); |
175 | return 0; |
176 | } |
177 | |
178 | page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; |
179 | |
180 | if (!pnv_xive_vst_page_size_allowed(page_shift)) { |
181 | xive_error(xive, "VST: invalid %s page shift %d" , info->name, |
182 | page_shift); |
183 | return 0; |
184 | } |
185 | |
186 | vst_per_page = (1ull << page_shift) / info->size; |
187 | vsd_idx = idx / vst_per_page; |
188 | |
189 | /* Load the VSD we are looking for, if not already done */ |
190 | if (vsd_idx) { |
191 | vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; |
192 | vsd = ldq_be_dma(&address_space_memory, vsd_addr); |
193 | |
194 | if (!(vsd & VSD_ADDRESS_MASK)) { |
195 | xive_error(xive, "VST: invalid %s entry %x !?" , info->name, idx); |
196 | return 0; |
197 | } |
198 | |
199 | /* |
200 | * Check that the pages have a consistent size across the |
201 | * indirect table |
202 | */ |
203 | if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { |
204 | xive_error(xive, "VST: %s entry %x indirect page size differ !?" , |
205 | info->name, idx); |
206 | return 0; |
207 | } |
208 | } |
209 | |
210 | return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); |
211 | } |
212 | |
213 | static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, |
214 | uint32_t idx) |
215 | { |
216 | const XiveVstInfo *info = &vst_infos[type]; |
217 | uint64_t vsd; |
218 | uint32_t idx_max; |
219 | |
220 | if (blk >= info->max_blocks) { |
221 | xive_error(xive, "VST: invalid block id %d for VST %s %d !?" , |
222 | blk, info->name, idx); |
223 | return 0; |
224 | } |
225 | |
226 | vsd = xive->vsds[type][blk]; |
227 | |
228 | /* Remote VST access */ |
229 | if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { |
230 | xive = pnv_xive_get_ic(blk); |
231 | |
232 | return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; |
233 | } |
234 | |
235 | idx_max = pnv_xive_vst_size(vsd) / info->size - 1; |
236 | if (idx > idx_max) { |
237 | #ifdef XIVE_DEBUG |
238 | xive_error(xive, "VST: %s entry %x/%x out of range [ 0 .. %x ] !?" , |
239 | info->name, blk, idx, idx_max); |
240 | #endif |
241 | return 0; |
242 | } |
243 | |
244 | if (VSD_INDIRECT & vsd) { |
245 | return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); |
246 | } |
247 | |
248 | return pnv_xive_vst_addr_direct(xive, type, vsd, idx); |
249 | } |
250 | |
251 | static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, |
252 | uint32_t idx, void *data) |
253 | { |
254 | const XiveVstInfo *info = &vst_infos[type]; |
255 | uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); |
256 | |
257 | if (!addr) { |
258 | return -1; |
259 | } |
260 | |
261 | cpu_physical_memory_read(addr, data, info->size); |
262 | return 0; |
263 | } |
264 | |
265 | #define XIVE_VST_WORD_ALL -1 |
266 | |
267 | static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, |
268 | uint32_t idx, void *data, uint32_t word_number) |
269 | { |
270 | const XiveVstInfo *info = &vst_infos[type]; |
271 | uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); |
272 | |
273 | if (!addr) { |
274 | return -1; |
275 | } |
276 | |
277 | if (word_number == XIVE_VST_WORD_ALL) { |
278 | cpu_physical_memory_write(addr, data, info->size); |
279 | } else { |
280 | cpu_physical_memory_write(addr + word_number * 4, |
281 | data + word_number * 4, 4); |
282 | } |
283 | return 0; |
284 | } |
285 | |
286 | static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, |
287 | XiveEND *end) |
288 | { |
289 | return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); |
290 | } |
291 | |
292 | static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, |
293 | XiveEND *end, uint8_t word_number) |
294 | { |
295 | return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, |
296 | word_number); |
297 | } |
298 | |
299 | static int pnv_xive_end_update(PnvXive *xive) |
300 | { |
301 | uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, |
302 | xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); |
303 | uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, |
304 | xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); |
305 | int i; |
306 | uint64_t eqc_watch[4]; |
307 | |
308 | for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { |
309 | eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); |
310 | } |
311 | |
312 | return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, |
313 | XIVE_VST_WORD_ALL); |
314 | } |
315 | |
316 | static void pnv_xive_end_cache_load(PnvXive *xive) |
317 | { |
318 | uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID, |
319 | xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); |
320 | uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET, |
321 | xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]); |
322 | uint64_t eqc_watch[4] = { 0 }; |
323 | int i; |
324 | |
325 | if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) { |
326 | xive_error(xive, "VST: no END entry %x/%x !?" , blk, idx); |
327 | } |
328 | |
329 | for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { |
330 | xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]); |
331 | } |
332 | } |
333 | |
334 | static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, |
335 | XiveNVT *nvt) |
336 | { |
337 | return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); |
338 | } |
339 | |
340 | static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, |
341 | XiveNVT *nvt, uint8_t word_number) |
342 | { |
343 | return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, |
344 | word_number); |
345 | } |
346 | |
347 | static int pnv_xive_nvt_update(PnvXive *xive) |
348 | { |
349 | uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, |
350 | xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); |
351 | uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, |
352 | xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); |
353 | int i; |
354 | uint64_t vpc_watch[8]; |
355 | |
356 | for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { |
357 | vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); |
358 | } |
359 | |
360 | return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, |
361 | XIVE_VST_WORD_ALL); |
362 | } |
363 | |
364 | static void pnv_xive_nvt_cache_load(PnvXive *xive) |
365 | { |
366 | uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID, |
367 | xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); |
368 | uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET, |
369 | xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]); |
370 | uint64_t vpc_watch[8] = { 0 }; |
371 | int i; |
372 | |
373 | if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) { |
374 | xive_error(xive, "VST: no NVT entry %x/%x !?" , blk, idx); |
375 | } |
376 | |
377 | for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { |
378 | xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]); |
379 | } |
380 | } |
381 | |
382 | static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, |
383 | XiveEAS *eas) |
384 | { |
385 | PnvXive *xive = PNV_XIVE(xrtr); |
386 | |
387 | if (pnv_xive_get_ic(blk) != xive) { |
388 | xive_error(xive, "VST: EAS %x is remote !?" , XIVE_SRCNO(blk, idx)); |
389 | return -1; |
390 | } |
391 | |
392 | return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); |
393 | } |
394 | |
395 | static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs) |
396 | { |
397 | PowerPCCPU *cpu = POWERPC_CPU(cs); |
398 | XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); |
399 | PnvXive *xive = NULL; |
400 | CPUPPCState *env = &cpu->env; |
401 | int pir = env->spr_cb[SPR_PIR].default_value; |
402 | |
403 | /* |
404 | * Perform an extra check on the HW thread enablement. |
405 | * |
406 | * The TIMA is shared among the chips and to identify the chip |
407 | * from which the access is being done, we extract the chip id |
408 | * from the PIR. |
409 | */ |
410 | xive = pnv_xive_get_ic((pir >> 8) & 0xf); |
411 | if (!xive) { |
412 | return NULL; |
413 | } |
414 | |
415 | if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { |
416 | xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled" , pir); |
417 | } |
418 | |
419 | return tctx; |
420 | } |
421 | |
422 | /* |
423 | * The internal sources (IPIs) of the interrupt controller have no |
424 | * knowledge of the XIVE chip on which they reside. Encode the block |
425 | * id in the source interrupt number before forwarding the source |
426 | * event notification to the Router. This is required on a multichip |
427 | * system. |
428 | */ |
429 | static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) |
430 | { |
431 | PnvXive *xive = PNV_XIVE(xn); |
432 | uint8_t blk = xive->chip->chip_id; |
433 | |
434 | xive_router_notify(xn, XIVE_SRCNO(blk, srcno)); |
435 | } |
436 | |
437 | /* |
438 | * XIVE helpers |
439 | */ |
440 | |
441 | static uint64_t pnv_xive_vc_size(PnvXive *xive) |
442 | { |
443 | return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; |
444 | } |
445 | |
446 | static uint64_t pnv_xive_edt_shift(PnvXive *xive) |
447 | { |
448 | return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); |
449 | } |
450 | |
451 | static uint64_t pnv_xive_pc_size(PnvXive *xive) |
452 | { |
453 | return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; |
454 | } |
455 | |
456 | static uint32_t pnv_xive_nr_ipis(PnvXive *xive) |
457 | { |
458 | uint8_t blk = xive->chip->chip_id; |
459 | |
460 | return pnv_xive_vst_size(xive->vsds[VST_TSEL_SBE][blk]) * SBE_PER_BYTE; |
461 | } |
462 | |
463 | static uint32_t pnv_xive_nr_ends(PnvXive *xive) |
464 | { |
465 | uint8_t blk = xive->chip->chip_id; |
466 | |
467 | return pnv_xive_vst_size(xive->vsds[VST_TSEL_EQDT][blk]) |
468 | / vst_infos[VST_TSEL_EQDT].size; |
469 | } |
470 | |
471 | /* |
472 | * EDT Table |
473 | * |
474 | * The Virtualization Controller MMIO region containing the IPI ESB |
475 | * pages and END ESB pages is sub-divided into "sets" which map |
476 | * portions of the VC region to the different ESB pages. It is |
477 | * configured at runtime through the EDT "Domain Table" to let the |
478 | * firmware decide how to split the VC address space between IPI ESB |
479 | * pages and END ESB pages. |
480 | */ |
481 | |
482 | /* |
483 | * Computes the overall size of the IPI or the END ESB pages |
484 | */ |
485 | static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) |
486 | { |
487 | uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); |
488 | uint64_t size = 0; |
489 | int i; |
490 | |
491 | for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { |
492 | uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); |
493 | |
494 | if (edt_type == type) { |
495 | size += edt_size; |
496 | } |
497 | } |
498 | |
499 | return size; |
500 | } |
501 | |
502 | /* |
503 | * Maps an offset of the VC region in the IPI or END region using the |
504 | * layout defined by the EDT "Domaine Table" |
505 | */ |
506 | static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, |
507 | uint64_t type) |
508 | { |
509 | int i; |
510 | uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); |
511 | uint64_t edt_offset = vc_offset; |
512 | |
513 | for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { |
514 | uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); |
515 | |
516 | if (edt_type != type) { |
517 | edt_offset -= edt_size; |
518 | } |
519 | } |
520 | |
521 | return edt_offset; |
522 | } |
523 | |
524 | static void pnv_xive_edt_resize(PnvXive *xive) |
525 | { |
526 | uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); |
527 | uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); |
528 | |
529 | memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); |
530 | memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); |
531 | |
532 | memory_region_set_size(&xive->end_edt_mmio, end_edt_size); |
533 | memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); |
534 | } |
535 | |
536 | /* |
537 | * XIVE Table configuration. Only EDT is supported. |
538 | */ |
539 | static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) |
540 | { |
541 | uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; |
542 | uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); |
543 | uint64_t *xive_table; |
544 | uint8_t max_index; |
545 | |
546 | switch (tsel) { |
547 | case CQ_TAR_TSEL_BLK: |
548 | max_index = ARRAY_SIZE(xive->blk); |
549 | xive_table = xive->blk; |
550 | break; |
551 | case CQ_TAR_TSEL_MIG: |
552 | max_index = ARRAY_SIZE(xive->mig); |
553 | xive_table = xive->mig; |
554 | break; |
555 | case CQ_TAR_TSEL_EDT: |
556 | max_index = ARRAY_SIZE(xive->edt); |
557 | xive_table = xive->edt; |
558 | break; |
559 | case CQ_TAR_TSEL_VDT: |
560 | max_index = ARRAY_SIZE(xive->vdt); |
561 | xive_table = xive->vdt; |
562 | break; |
563 | default: |
564 | xive_error(xive, "IC: invalid table %d" , (int) tsel); |
565 | return -1; |
566 | } |
567 | |
568 | if (tsel_index >= max_index) { |
569 | xive_error(xive, "IC: invalid index %d" , (int) tsel_index); |
570 | return -1; |
571 | } |
572 | |
573 | xive_table[tsel_index] = val; |
574 | |
575 | if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { |
576 | xive->regs[CQ_TAR >> 3] = |
577 | SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); |
578 | } |
579 | |
580 | /* |
581 | * EDT configuration is complete. Resize the MMIO windows exposing |
582 | * the IPI and the END ESBs in the VC region. |
583 | */ |
584 | if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { |
585 | pnv_xive_edt_resize(xive); |
586 | } |
587 | |
588 | return 0; |
589 | } |
590 | |
591 | /* |
592 | * Virtual Structure Tables (VST) configuration |
593 | */ |
594 | static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, |
595 | uint8_t blk, uint64_t vsd) |
596 | { |
597 | XiveENDSource *end_xsrc = &xive->end_source; |
598 | XiveSource *xsrc = &xive->ipi_source; |
599 | const XiveVstInfo *info = &vst_infos[type]; |
600 | uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; |
601 | uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; |
602 | |
603 | /* Basic checks */ |
604 | |
605 | if (VSD_INDIRECT & vsd) { |
606 | if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { |
607 | xive_error(xive, "VST: %s indirect tables are not enabled" , |
608 | info->name); |
609 | return; |
610 | } |
611 | |
612 | if (!pnv_xive_vst_page_size_allowed(page_shift)) { |
613 | xive_error(xive, "VST: invalid %s page shift %d" , info->name, |
614 | page_shift); |
615 | return; |
616 | } |
617 | } |
618 | |
619 | if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { |
620 | xive_error(xive, "VST: %s table address 0x%" PRIx64" is not aligned with" |
621 | " page shift %d" , info->name, vst_addr, page_shift); |
622 | return; |
623 | } |
624 | |
625 | /* Record the table configuration (in SRAM on HW) */ |
626 | xive->vsds[type][blk] = vsd; |
627 | |
628 | /* Now tune the models with the configuration provided by the FW */ |
629 | |
630 | switch (type) { |
631 | case VST_TSEL_IVT: /* Nothing to be done */ |
632 | break; |
633 | |
634 | case VST_TSEL_EQDT: |
635 | /* |
636 | * Backing store pages for the END. Compute the number of ENDs |
637 | * provisioned by FW and resize the END ESB window accordingly. |
638 | */ |
639 | memory_region_set_size(&end_xsrc->esb_mmio, pnv_xive_nr_ends(xive) * |
640 | (1ull << (end_xsrc->esb_shift + 1))); |
641 | memory_region_add_subregion(&xive->end_edt_mmio, 0, |
642 | &end_xsrc->esb_mmio); |
643 | break; |
644 | |
645 | case VST_TSEL_SBE: |
646 | /* |
647 | * Backing store pages for the source PQ bits. The model does |
648 | * not use these PQ bits backed in RAM because the XiveSource |
649 | * model has its own. Compute the number of IRQs provisioned |
650 | * by FW and resize the IPI ESB window accordingly. |
651 | */ |
652 | memory_region_set_size(&xsrc->esb_mmio, pnv_xive_nr_ipis(xive) * |
653 | (1ull << xsrc->esb_shift)); |
654 | memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); |
655 | break; |
656 | |
657 | case VST_TSEL_VPDT: /* Not modeled */ |
658 | case VST_TSEL_IRQ: /* Not modeled */ |
659 | /* |
660 | * These tables contains the backing store pages for the |
661 | * interrupt fifos of the VC sub-engine in case of overflow. |
662 | */ |
663 | break; |
664 | |
665 | default: |
666 | g_assert_not_reached(); |
667 | } |
668 | } |
669 | |
670 | /* |
671 | * Both PC and VC sub-engines are configured as each use the Virtual |
672 | * Structure Tables : SBE, EAS, END and NVT. |
673 | */ |
674 | static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) |
675 | { |
676 | uint8_t mode = GETFIELD(VSD_MODE, vsd); |
677 | uint8_t type = GETFIELD(VST_TABLE_SELECT, |
678 | xive->regs[VC_VSD_TABLE_ADDR >> 3]); |
679 | uint8_t blk = GETFIELD(VST_TABLE_BLOCK, |
680 | xive->regs[VC_VSD_TABLE_ADDR >> 3]); |
681 | uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; |
682 | |
683 | if (type > VST_TSEL_IRQ) { |
684 | xive_error(xive, "VST: invalid table type %d" , type); |
685 | return; |
686 | } |
687 | |
688 | if (blk >= vst_infos[type].max_blocks) { |
689 | xive_error(xive, "VST: invalid block id %d for" |
690 | " %s table" , blk, vst_infos[type].name); |
691 | return; |
692 | } |
693 | |
694 | /* |
695 | * Only take the VC sub-engine configuration into account because |
696 | * the XiveRouter model combines both VC and PC sub-engines |
697 | */ |
698 | if (pc_engine) { |
699 | return; |
700 | } |
701 | |
702 | if (!vst_addr) { |
703 | xive_error(xive, "VST: invalid %s table address" , vst_infos[type].name); |
704 | return; |
705 | } |
706 | |
707 | switch (mode) { |
708 | case VSD_MODE_FORWARD: |
709 | xive->vsds[type][blk] = vsd; |
710 | break; |
711 | |
712 | case VSD_MODE_EXCLUSIVE: |
713 | pnv_xive_vst_set_exclusive(xive, type, blk, vsd); |
714 | break; |
715 | |
716 | default: |
717 | xive_error(xive, "VST: unsupported table mode %d" , mode); |
718 | return; |
719 | } |
720 | } |
721 | |
722 | /* |
723 | * Interrupt controller MMIO region. The layout is compatible between |
724 | * 4K and 64K pages : |
725 | * |
726 | * Page 0 sub-engine BARs |
727 | * 0x000 - 0x3FF IC registers |
728 | * 0x400 - 0x7FF PC registers |
729 | * 0x800 - 0xFFF VC registers |
730 | * |
731 | * Page 1 Notify page (writes only) |
732 | * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) |
733 | * 0x800 - 0xFFF forwards and syncs |
734 | * |
735 | * Page 2 LSI Trigger page (writes only) (not modeled) |
736 | * Page 3 LSI SB EOI page (reads only) (not modeled) |
737 | * |
738 | * Page 4-7 indirect TIMA |
739 | */ |
740 | |
741 | /* |
742 | * IC - registers MMIO |
743 | */ |
744 | static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, |
745 | uint64_t val, unsigned size) |
746 | { |
747 | PnvXive *xive = PNV_XIVE(opaque); |
748 | MemoryRegion *sysmem = get_system_memory(); |
749 | uint32_t reg = offset >> 3; |
750 | bool is_chip0 = xive->chip->chip_id == 0; |
751 | |
752 | switch (offset) { |
753 | |
754 | /* |
755 | * XIVE CQ (PowerBus bridge) settings |
756 | */ |
757 | case CQ_MSGSND: /* msgsnd for doorbells */ |
758 | case CQ_FIRMASK_OR: /* FIR error reporting */ |
759 | break; |
760 | case CQ_PBI_CTL: |
761 | if (val & CQ_PBI_PC_64K) { |
762 | xive->pc_shift = 16; |
763 | } |
764 | if (val & CQ_PBI_VC_64K) { |
765 | xive->vc_shift = 16; |
766 | } |
767 | break; |
768 | case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ |
769 | /* |
770 | * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode |
771 | */ |
772 | break; |
773 | |
774 | /* |
775 | * XIVE Virtualization Controller settings |
776 | */ |
777 | case VC_GLOBAL_CONFIG: |
778 | break; |
779 | |
780 | /* |
781 | * XIVE Presenter Controller settings |
782 | */ |
783 | case PC_GLOBAL_CONFIG: |
784 | /* |
785 | * PC_GCONF_CHIPID_OVR |
786 | * Overrides Int command Chip ID with the Chip ID field (DEBUG) |
787 | */ |
788 | break; |
789 | case PC_TCTXT_CFG: |
790 | /* |
791 | * TODO: block group support |
792 | * |
793 | * PC_TCTXT_CFG_BLKGRP_EN |
794 | * PC_TCTXT_CFG_HARD_CHIPID_BLK : |
795 | * Moves the chipid into block field for hardwired CAM compares. |
796 | * Block offset value is adjusted to 0b0..01 & ThrdId |
797 | * |
798 | * Will require changes in xive_presenter_tctx_match(). I am |
799 | * not sure how to handle that yet. |
800 | */ |
801 | |
802 | /* Overrides hardwired chip ID with the chip ID field */ |
803 | if (val & PC_TCTXT_CHIPID_OVERRIDE) { |
804 | xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val); |
805 | } |
806 | break; |
807 | case PC_TCTXT_TRACK: |
808 | /* |
809 | * PC_TCTXT_TRACK_EN: |
810 | * enable block tracking and exchange of block ownership |
811 | * information between Interrupt controllers |
812 | */ |
813 | break; |
814 | |
815 | /* |
816 | * Misc settings |
817 | */ |
818 | case VC_SBC_CONFIG: /* Store EOI configuration */ |
819 | /* |
820 | * Configure store EOI if required by firwmare (skiboot has removed |
821 | * support recently though) |
822 | */ |
823 | if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { |
824 | xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI; |
825 | } |
826 | break; |
827 | |
828 | case VC_EQC_CONFIG: /* TODO: silent escalation */ |
829 | case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ |
830 | break; |
831 | |
832 | /* |
833 | * XIVE BAR settings (XSCOM only) |
834 | */ |
835 | case CQ_RST_CTL: |
836 | /* bit4: resets all BAR registers */ |
837 | break; |
838 | |
839 | case CQ_IC_BAR: /* IC BAR. 8 pages */ |
840 | xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; |
841 | if (!(val & CQ_IC_BAR_VALID)) { |
842 | xive->ic_base = 0; |
843 | if (xive->regs[reg] & CQ_IC_BAR_VALID) { |
844 | memory_region_del_subregion(&xive->ic_mmio, |
845 | &xive->ic_reg_mmio); |
846 | memory_region_del_subregion(&xive->ic_mmio, |
847 | &xive->ic_notify_mmio); |
848 | memory_region_del_subregion(&xive->ic_mmio, |
849 | &xive->ic_lsi_mmio); |
850 | memory_region_del_subregion(&xive->ic_mmio, |
851 | &xive->tm_indirect_mmio); |
852 | |
853 | memory_region_del_subregion(sysmem, &xive->ic_mmio); |
854 | } |
855 | } else { |
856 | xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); |
857 | if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { |
858 | memory_region_add_subregion(sysmem, xive->ic_base, |
859 | &xive->ic_mmio); |
860 | |
861 | memory_region_add_subregion(&xive->ic_mmio, 0, |
862 | &xive->ic_reg_mmio); |
863 | memory_region_add_subregion(&xive->ic_mmio, |
864 | 1ul << xive->ic_shift, |
865 | &xive->ic_notify_mmio); |
866 | memory_region_add_subregion(&xive->ic_mmio, |
867 | 2ul << xive->ic_shift, |
868 | &xive->ic_lsi_mmio); |
869 | memory_region_add_subregion(&xive->ic_mmio, |
870 | 4ull << xive->ic_shift, |
871 | &xive->tm_indirect_mmio); |
872 | } |
873 | } |
874 | break; |
875 | |
876 | case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ |
877 | case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ |
878 | xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; |
879 | if (!(val & CQ_TM_BAR_VALID)) { |
880 | xive->tm_base = 0; |
881 | if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { |
882 | memory_region_del_subregion(sysmem, &xive->tm_mmio); |
883 | } |
884 | } else { |
885 | xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); |
886 | if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { |
887 | memory_region_add_subregion(sysmem, xive->tm_base, |
888 | &xive->tm_mmio); |
889 | } |
890 | } |
891 | break; |
892 | |
893 | case CQ_PC_BARM: |
894 | xive->regs[reg] = val; |
895 | memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); |
896 | break; |
897 | case CQ_PC_BAR: /* From 32M to 512G */ |
898 | if (!(val & CQ_PC_BAR_VALID)) { |
899 | xive->pc_base = 0; |
900 | if (xive->regs[reg] & CQ_PC_BAR_VALID) { |
901 | memory_region_del_subregion(sysmem, &xive->pc_mmio); |
902 | } |
903 | } else { |
904 | xive->pc_base = val & ~(CQ_PC_BAR_VALID); |
905 | if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { |
906 | memory_region_add_subregion(sysmem, xive->pc_base, |
907 | &xive->pc_mmio); |
908 | } |
909 | } |
910 | break; |
911 | |
912 | case CQ_VC_BARM: |
913 | xive->regs[reg] = val; |
914 | memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); |
915 | break; |
916 | case CQ_VC_BAR: /* From 64M to 4TB */ |
917 | if (!(val & CQ_VC_BAR_VALID)) { |
918 | xive->vc_base = 0; |
919 | if (xive->regs[reg] & CQ_VC_BAR_VALID) { |
920 | memory_region_del_subregion(sysmem, &xive->vc_mmio); |
921 | } |
922 | } else { |
923 | xive->vc_base = val & ~(CQ_VC_BAR_VALID); |
924 | if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { |
925 | memory_region_add_subregion(sysmem, xive->vc_base, |
926 | &xive->vc_mmio); |
927 | } |
928 | } |
929 | break; |
930 | |
931 | /* |
932 | * XIVE Table settings. |
933 | */ |
934 | case CQ_TAR: /* Table Address */ |
935 | break; |
936 | case CQ_TDR: /* Table Data */ |
937 | pnv_xive_table_set_data(xive, val); |
938 | break; |
939 | |
940 | /* |
941 | * XIVE VC & PC Virtual Structure Table settings |
942 | */ |
943 | case VC_VSD_TABLE_ADDR: |
944 | case PC_VSD_TABLE_ADDR: /* Virtual table selector */ |
945 | break; |
946 | case VC_VSD_TABLE_DATA: /* Virtual table setting */ |
947 | case PC_VSD_TABLE_DATA: |
948 | pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); |
949 | break; |
950 | |
951 | /* |
952 | * Interrupt fifo overflow in memory backing store (Not modeled) |
953 | */ |
954 | case VC_IRQ_CONFIG_IPI: |
955 | case VC_IRQ_CONFIG_HW: |
956 | case VC_IRQ_CONFIG_CASCADE1: |
957 | case VC_IRQ_CONFIG_CASCADE2: |
958 | case VC_IRQ_CONFIG_REDIST: |
959 | case VC_IRQ_CONFIG_IPI_CASC: |
960 | break; |
961 | |
962 | /* |
963 | * XIVE hardware thread enablement |
964 | */ |
965 | case PC_THREAD_EN_REG0: /* Physical Thread Enable */ |
966 | case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ |
967 | break; |
968 | |
969 | case PC_THREAD_EN_REG0_SET: |
970 | xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; |
971 | break; |
972 | case PC_THREAD_EN_REG1_SET: |
973 | xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; |
974 | break; |
975 | case PC_THREAD_EN_REG0_CLR: |
976 | xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; |
977 | break; |
978 | case PC_THREAD_EN_REG1_CLR: |
979 | xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; |
980 | break; |
981 | |
982 | /* |
983 | * Indirect TIMA access set up. Defines the PIR of the HW thread |
984 | * to use. |
985 | */ |
986 | case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: |
987 | break; |
988 | |
989 | /* |
990 | * XIVE PC & VC cache updates for EAS, NVT and END |
991 | */ |
992 | case VC_IVC_SCRUB_MASK: |
993 | case VC_IVC_SCRUB_TRIG: |
994 | break; |
995 | |
996 | case VC_EQC_CWATCH_SPEC: |
997 | val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */ |
998 | break; |
999 | case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: |
1000 | break; |
1001 | case VC_EQC_CWATCH_DAT0: |
1002 | /* writing to DATA0 triggers the cache write */ |
1003 | xive->regs[reg] = val; |
1004 | pnv_xive_end_update(xive); |
1005 | break; |
1006 | case VC_EQC_SCRUB_MASK: |
1007 | case VC_EQC_SCRUB_TRIG: |
1008 | /* |
1009 | * The scrubbing registers flush the cache in RAM and can also |
1010 | * invalidate. |
1011 | */ |
1012 | break; |
1013 | |
1014 | case PC_VPC_CWATCH_SPEC: |
1015 | val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */ |
1016 | break; |
1017 | case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: |
1018 | break; |
1019 | case PC_VPC_CWATCH_DAT0: |
1020 | /* writing to DATA0 triggers the cache write */ |
1021 | xive->regs[reg] = val; |
1022 | pnv_xive_nvt_update(xive); |
1023 | break; |
1024 | case PC_VPC_SCRUB_MASK: |
1025 | case PC_VPC_SCRUB_TRIG: |
1026 | /* |
1027 | * The scrubbing registers flush the cache in RAM and can also |
1028 | * invalidate. |
1029 | */ |
1030 | break; |
1031 | |
1032 | |
1033 | /* |
1034 | * XIVE PC & VC cache invalidation |
1035 | */ |
1036 | case PC_AT_KILL: |
1037 | break; |
1038 | case VC_AT_MACRO_KILL: |
1039 | break; |
1040 | case PC_AT_KILL_MASK: |
1041 | case VC_AT_MACRO_KILL_MASK: |
1042 | break; |
1043 | |
1044 | default: |
1045 | xive_error(xive, "IC: invalid write to reg=0x%" HWADDR_PRIx, offset); |
1046 | return; |
1047 | } |
1048 | |
1049 | xive->regs[reg] = val; |
1050 | } |
1051 | |
1052 | static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) |
1053 | { |
1054 | PnvXive *xive = PNV_XIVE(opaque); |
1055 | uint64_t val = 0; |
1056 | uint32_t reg = offset >> 3; |
1057 | |
1058 | switch (offset) { |
1059 | case CQ_CFG_PB_GEN: |
1060 | case CQ_IC_BAR: |
1061 | case CQ_TM1_BAR: |
1062 | case CQ_TM2_BAR: |
1063 | case CQ_PC_BAR: |
1064 | case CQ_PC_BARM: |
1065 | case CQ_VC_BAR: |
1066 | case CQ_VC_BARM: |
1067 | case CQ_TAR: |
1068 | case CQ_TDR: |
1069 | case CQ_PBI_CTL: |
1070 | |
1071 | case PC_TCTXT_CFG: |
1072 | case PC_TCTXT_TRACK: |
1073 | case PC_TCTXT_INDIR0: |
1074 | case PC_TCTXT_INDIR1: |
1075 | case PC_TCTXT_INDIR2: |
1076 | case PC_TCTXT_INDIR3: |
1077 | case PC_GLOBAL_CONFIG: |
1078 | |
1079 | case PC_VPC_SCRUB_MASK: |
1080 | |
1081 | case VC_GLOBAL_CONFIG: |
1082 | case VC_AIB_TX_ORDER_TAG2: |
1083 | |
1084 | case VC_IRQ_CONFIG_IPI: |
1085 | case VC_IRQ_CONFIG_HW: |
1086 | case VC_IRQ_CONFIG_CASCADE1: |
1087 | case VC_IRQ_CONFIG_CASCADE2: |
1088 | case VC_IRQ_CONFIG_REDIST: |
1089 | case VC_IRQ_CONFIG_IPI_CASC: |
1090 | |
1091 | case VC_EQC_SCRUB_MASK: |
1092 | case VC_IVC_SCRUB_MASK: |
1093 | case VC_SBC_CONFIG: |
1094 | case VC_AT_MACRO_KILL_MASK: |
1095 | case VC_VSD_TABLE_ADDR: |
1096 | case PC_VSD_TABLE_ADDR: |
1097 | case VC_VSD_TABLE_DATA: |
1098 | case PC_VSD_TABLE_DATA: |
1099 | case PC_THREAD_EN_REG0: |
1100 | case PC_THREAD_EN_REG1: |
1101 | val = xive->regs[reg]; |
1102 | break; |
1103 | |
1104 | /* |
1105 | * XIVE hardware thread enablement |
1106 | */ |
1107 | case PC_THREAD_EN_REG0_SET: |
1108 | case PC_THREAD_EN_REG0_CLR: |
1109 | val = xive->regs[PC_THREAD_EN_REG0 >> 3]; |
1110 | break; |
1111 | case PC_THREAD_EN_REG1_SET: |
1112 | case PC_THREAD_EN_REG1_CLR: |
1113 | val = xive->regs[PC_THREAD_EN_REG1 >> 3]; |
1114 | break; |
1115 | |
1116 | case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ |
1117 | val = 0xffffff0000000000; |
1118 | break; |
1119 | |
1120 | /* |
1121 | * XIVE PC & VC cache updates for EAS, NVT and END |
1122 | */ |
1123 | case VC_EQC_CWATCH_SPEC: |
1124 | xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT); |
1125 | val = xive->regs[reg]; |
1126 | break; |
1127 | case VC_EQC_CWATCH_DAT0: |
1128 | /* |
1129 | * Load DATA registers from cache with data requested by the |
1130 | * SPEC register |
1131 | */ |
1132 | pnv_xive_end_cache_load(xive); |
1133 | val = xive->regs[reg]; |
1134 | break; |
1135 | case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3: |
1136 | val = xive->regs[reg]; |
1137 | break; |
1138 | |
1139 | case PC_VPC_CWATCH_SPEC: |
1140 | xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT); |
1141 | val = xive->regs[reg]; |
1142 | break; |
1143 | case PC_VPC_CWATCH_DAT0: |
1144 | /* |
1145 | * Load DATA registers from cache with data requested by the |
1146 | * SPEC register |
1147 | */ |
1148 | pnv_xive_nvt_cache_load(xive); |
1149 | val = xive->regs[reg]; |
1150 | break; |
1151 | case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7: |
1152 | val = xive->regs[reg]; |
1153 | break; |
1154 | |
1155 | case PC_VPC_SCRUB_TRIG: |
1156 | case VC_IVC_SCRUB_TRIG: |
1157 | case VC_EQC_SCRUB_TRIG: |
1158 | xive->regs[reg] &= ~VC_SCRUB_VALID; |
1159 | val = xive->regs[reg]; |
1160 | break; |
1161 | |
1162 | /* |
1163 | * XIVE PC & VC cache invalidation |
1164 | */ |
1165 | case PC_AT_KILL: |
1166 | xive->regs[reg] &= ~PC_AT_KILL_VALID; |
1167 | val = xive->regs[reg]; |
1168 | break; |
1169 | case VC_AT_MACRO_KILL: |
1170 | xive->regs[reg] &= ~VC_KILL_VALID; |
1171 | val = xive->regs[reg]; |
1172 | break; |
1173 | |
1174 | /* |
1175 | * XIVE synchronisation |
1176 | */ |
1177 | case VC_EQC_CONFIG: |
1178 | val = VC_EQC_SYNC_MASK; |
1179 | break; |
1180 | |
1181 | default: |
1182 | xive_error(xive, "IC: invalid read reg=0x%" HWADDR_PRIx, offset); |
1183 | } |
1184 | |
1185 | return val; |
1186 | } |
1187 | |
1188 | static const MemoryRegionOps pnv_xive_ic_reg_ops = { |
1189 | .read = pnv_xive_ic_reg_read, |
1190 | .write = pnv_xive_ic_reg_write, |
1191 | .endianness = DEVICE_BIG_ENDIAN, |
1192 | .valid = { |
1193 | .min_access_size = 8, |
1194 | .max_access_size = 8, |
1195 | }, |
1196 | .impl = { |
1197 | .min_access_size = 8, |
1198 | .max_access_size = 8, |
1199 | }, |
1200 | }; |
1201 | |
1202 | /* |
1203 | * IC - Notify MMIO port page (write only) |
1204 | */ |
1205 | #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ |
1206 | #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ |
1207 | #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ |
1208 | #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ |
1209 | #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ |
1210 | #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ |
1211 | #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ |
1212 | #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ |
1213 | |
1214 | /* VC synchronisation */ |
1215 | #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ |
1216 | #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ |
1217 | #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ |
1218 | #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ |
1219 | #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ |
1220 | |
1221 | /* PC synchronisation */ |
1222 | #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ |
1223 | #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ |
1224 | #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ |
1225 | |
1226 | static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) |
1227 | { |
1228 | /* |
1229 | * Forward the source event notification directly to the Router. |
1230 | * The source interrupt number should already be correctly encoded |
1231 | * with the chip block id by the sending device (PHB, PSI). |
1232 | */ |
1233 | xive_router_notify(XIVE_NOTIFIER(xive), val); |
1234 | } |
1235 | |
1236 | static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, |
1237 | unsigned size) |
1238 | { |
1239 | PnvXive *xive = PNV_XIVE(opaque); |
1240 | |
1241 | /* VC: HW triggers */ |
1242 | switch (addr) { |
1243 | case 0x000 ... 0x7FF: |
1244 | pnv_xive_ic_hw_trigger(opaque, addr, val); |
1245 | break; |
1246 | |
1247 | /* VC: Forwarded IRQs */ |
1248 | case PNV_XIVE_FORWARD_IPI: |
1249 | case PNV_XIVE_FORWARD_HW: |
1250 | case PNV_XIVE_FORWARD_OS_ESC: |
1251 | case PNV_XIVE_FORWARD_HW_ESC: |
1252 | case PNV_XIVE_FORWARD_REDIS: |
1253 | /* TODO: forwarded IRQs. Should be like HW triggers */ |
1254 | xive_error(xive, "IC: forwarded at @0x%" HWADDR_PRIx" IRQ 0x%" PRIx64, |
1255 | addr, val); |
1256 | break; |
1257 | |
1258 | /* VC syncs */ |
1259 | case PNV_XIVE_SYNC_IPI: |
1260 | case PNV_XIVE_SYNC_HW: |
1261 | case PNV_XIVE_SYNC_OS_ESC: |
1262 | case PNV_XIVE_SYNC_HW_ESC: |
1263 | case PNV_XIVE_SYNC_REDIS: |
1264 | break; |
1265 | |
1266 | /* PC syncs */ |
1267 | case PNV_XIVE_SYNC_PULL: |
1268 | case PNV_XIVE_SYNC_PUSH: |
1269 | case PNV_XIVE_SYNC_VPC: |
1270 | break; |
1271 | |
1272 | default: |
1273 | xive_error(xive, "IC: invalid notify write @%" HWADDR_PRIx, addr); |
1274 | } |
1275 | } |
1276 | |
1277 | static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, |
1278 | unsigned size) |
1279 | { |
1280 | PnvXive *xive = PNV_XIVE(opaque); |
1281 | |
1282 | /* loads are invalid */ |
1283 | xive_error(xive, "IC: invalid notify read @%" HWADDR_PRIx, addr); |
1284 | return -1; |
1285 | } |
1286 | |
1287 | static const MemoryRegionOps pnv_xive_ic_notify_ops = { |
1288 | .read = pnv_xive_ic_notify_read, |
1289 | .write = pnv_xive_ic_notify_write, |
1290 | .endianness = DEVICE_BIG_ENDIAN, |
1291 | .valid = { |
1292 | .min_access_size = 8, |
1293 | .max_access_size = 8, |
1294 | }, |
1295 | .impl = { |
1296 | .min_access_size = 8, |
1297 | .max_access_size = 8, |
1298 | }, |
1299 | }; |
1300 | |
1301 | /* |
1302 | * IC - LSI MMIO handlers (not modeled) |
1303 | */ |
1304 | |
1305 | static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, |
1306 | uint64_t val, unsigned size) |
1307 | { |
1308 | PnvXive *xive = PNV_XIVE(opaque); |
1309 | |
1310 | xive_error(xive, "IC: LSI invalid write @%" HWADDR_PRIx, addr); |
1311 | } |
1312 | |
1313 | static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) |
1314 | { |
1315 | PnvXive *xive = PNV_XIVE(opaque); |
1316 | |
1317 | xive_error(xive, "IC: LSI invalid read @%" HWADDR_PRIx, addr); |
1318 | return -1; |
1319 | } |
1320 | |
1321 | static const MemoryRegionOps pnv_xive_ic_lsi_ops = { |
1322 | .read = pnv_xive_ic_lsi_read, |
1323 | .write = pnv_xive_ic_lsi_write, |
1324 | .endianness = DEVICE_BIG_ENDIAN, |
1325 | .valid = { |
1326 | .min_access_size = 8, |
1327 | .max_access_size = 8, |
1328 | }, |
1329 | .impl = { |
1330 | .min_access_size = 8, |
1331 | .max_access_size = 8, |
1332 | }, |
1333 | }; |
1334 | |
1335 | /* |
1336 | * IC - Indirect TIMA MMIO handlers |
1337 | */ |
1338 | |
1339 | /* |
1340 | * When the TIMA is accessed from the indirect page, the thread id |
1341 | * (PIR) has to be configured in the IC registers before. This is used |
1342 | * for resets and for debug purpose also. |
1343 | */ |
1344 | static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) |
1345 | { |
1346 | uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; |
1347 | PowerPCCPU *cpu = NULL; |
1348 | int pir; |
1349 | |
1350 | if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { |
1351 | xive_error(xive, "IC: no indirect TIMA access in progress" ); |
1352 | return NULL; |
1353 | } |
1354 | |
1355 | pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff; |
1356 | cpu = ppc_get_vcpu_by_pir(pir); |
1357 | if (!cpu) { |
1358 | xive_error(xive, "IC: invalid PIR %x for indirect access" , pir); |
1359 | return NULL; |
1360 | } |
1361 | |
1362 | /* Check that HW thread is XIVE enabled */ |
1363 | if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { |
1364 | xive_error(xive, "IC: CPU %x is not enabled" , pir); |
1365 | } |
1366 | |
1367 | return XIVE_TCTX(pnv_cpu_state(cpu)->intc); |
1368 | } |
1369 | |
1370 | static void xive_tm_indirect_write(void *opaque, hwaddr offset, |
1371 | uint64_t value, unsigned size) |
1372 | { |
1373 | XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); |
1374 | |
1375 | xive_tctx_tm_write(tctx, offset, value, size); |
1376 | } |
1377 | |
1378 | static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, |
1379 | unsigned size) |
1380 | { |
1381 | XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); |
1382 | |
1383 | return xive_tctx_tm_read(tctx, offset, size); |
1384 | } |
1385 | |
1386 | static const MemoryRegionOps xive_tm_indirect_ops = { |
1387 | .read = xive_tm_indirect_read, |
1388 | .write = xive_tm_indirect_write, |
1389 | .endianness = DEVICE_BIG_ENDIAN, |
1390 | .valid = { |
1391 | .min_access_size = 1, |
1392 | .max_access_size = 8, |
1393 | }, |
1394 | .impl = { |
1395 | .min_access_size = 1, |
1396 | .max_access_size = 8, |
1397 | }, |
1398 | }; |
1399 | |
1400 | /* |
1401 | * Interrupt controller XSCOM region. |
1402 | */ |
1403 | static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) |
1404 | { |
1405 | switch (addr >> 3) { |
1406 | case X_VC_EQC_CONFIG: |
1407 | /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ |
1408 | return VC_EQC_SYNC_MASK; |
1409 | default: |
1410 | return pnv_xive_ic_reg_read(opaque, addr, size); |
1411 | } |
1412 | } |
1413 | |
1414 | static void pnv_xive_xscom_write(void *opaque, hwaddr addr, |
1415 | uint64_t val, unsigned size) |
1416 | { |
1417 | pnv_xive_ic_reg_write(opaque, addr, val, size); |
1418 | } |
1419 | |
1420 | static const MemoryRegionOps pnv_xive_xscom_ops = { |
1421 | .read = pnv_xive_xscom_read, |
1422 | .write = pnv_xive_xscom_write, |
1423 | .endianness = DEVICE_BIG_ENDIAN, |
1424 | .valid = { |
1425 | .min_access_size = 8, |
1426 | .max_access_size = 8, |
1427 | }, |
1428 | .impl = { |
1429 | .min_access_size = 8, |
1430 | .max_access_size = 8, |
1431 | } |
1432 | }; |
1433 | |
1434 | /* |
1435 | * Virtualization Controller MMIO region containing the IPI and END ESB pages |
1436 | */ |
1437 | static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, |
1438 | unsigned size) |
1439 | { |
1440 | PnvXive *xive = PNV_XIVE(opaque); |
1441 | uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); |
1442 | uint64_t edt_type = 0; |
1443 | uint64_t edt_offset; |
1444 | MemTxResult result; |
1445 | AddressSpace *edt_as = NULL; |
1446 | uint64_t ret = -1; |
1447 | |
1448 | if (edt_index < XIVE_TABLE_EDT_MAX) { |
1449 | edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); |
1450 | } |
1451 | |
1452 | switch (edt_type) { |
1453 | case CQ_TDR_EDT_IPI: |
1454 | edt_as = &xive->ipi_as; |
1455 | break; |
1456 | case CQ_TDR_EDT_EQ: |
1457 | edt_as = &xive->end_as; |
1458 | break; |
1459 | default: |
1460 | xive_error(xive, "VC: invalid EDT type for read @%" HWADDR_PRIx, offset); |
1461 | return -1; |
1462 | } |
1463 | |
1464 | /* Remap the offset for the targeted address space */ |
1465 | edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); |
1466 | |
1467 | ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, |
1468 | &result); |
1469 | |
1470 | if (result != MEMTX_OK) { |
1471 | xive_error(xive, "VC: %s read failed at @0x%" HWADDR_PRIx " -> @0x%" |
1472 | HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END" , |
1473 | offset, edt_offset); |
1474 | return -1; |
1475 | } |
1476 | |
1477 | return ret; |
1478 | } |
1479 | |
1480 | static void pnv_xive_vc_write(void *opaque, hwaddr offset, |
1481 | uint64_t val, unsigned size) |
1482 | { |
1483 | PnvXive *xive = PNV_XIVE(opaque); |
1484 | uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); |
1485 | uint64_t edt_type = 0; |
1486 | uint64_t edt_offset; |
1487 | MemTxResult result; |
1488 | AddressSpace *edt_as = NULL; |
1489 | |
1490 | if (edt_index < XIVE_TABLE_EDT_MAX) { |
1491 | edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); |
1492 | } |
1493 | |
1494 | switch (edt_type) { |
1495 | case CQ_TDR_EDT_IPI: |
1496 | edt_as = &xive->ipi_as; |
1497 | break; |
1498 | case CQ_TDR_EDT_EQ: |
1499 | edt_as = &xive->end_as; |
1500 | break; |
1501 | default: |
1502 | xive_error(xive, "VC: invalid EDT type for write @%" HWADDR_PRIx, |
1503 | offset); |
1504 | return; |
1505 | } |
1506 | |
1507 | /* Remap the offset for the targeted address space */ |
1508 | edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); |
1509 | |
1510 | address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); |
1511 | if (result != MEMTX_OK) { |
1512 | xive_error(xive, "VC: write failed at @0x%" HWADDR_PRIx, edt_offset); |
1513 | } |
1514 | } |
1515 | |
1516 | static const MemoryRegionOps pnv_xive_vc_ops = { |
1517 | .read = pnv_xive_vc_read, |
1518 | .write = pnv_xive_vc_write, |
1519 | .endianness = DEVICE_BIG_ENDIAN, |
1520 | .valid = { |
1521 | .min_access_size = 8, |
1522 | .max_access_size = 8, |
1523 | }, |
1524 | .impl = { |
1525 | .min_access_size = 8, |
1526 | .max_access_size = 8, |
1527 | }, |
1528 | }; |
1529 | |
1530 | /* |
1531 | * Presenter Controller MMIO region. The Virtualization Controller |
1532 | * updates the IPB in the NVT table when required. Not modeled. |
1533 | */ |
1534 | static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, |
1535 | unsigned size) |
1536 | { |
1537 | PnvXive *xive = PNV_XIVE(opaque); |
1538 | |
1539 | xive_error(xive, "PC: invalid read @%" HWADDR_PRIx, addr); |
1540 | return -1; |
1541 | } |
1542 | |
1543 | static void pnv_xive_pc_write(void *opaque, hwaddr addr, |
1544 | uint64_t value, unsigned size) |
1545 | { |
1546 | PnvXive *xive = PNV_XIVE(opaque); |
1547 | |
1548 | xive_error(xive, "PC: invalid write to VC @%" HWADDR_PRIx, addr); |
1549 | } |
1550 | |
1551 | static const MemoryRegionOps pnv_xive_pc_ops = { |
1552 | .read = pnv_xive_pc_read, |
1553 | .write = pnv_xive_pc_write, |
1554 | .endianness = DEVICE_BIG_ENDIAN, |
1555 | .valid = { |
1556 | .min_access_size = 8, |
1557 | .max_access_size = 8, |
1558 | }, |
1559 | .impl = { |
1560 | .min_access_size = 8, |
1561 | .max_access_size = 8, |
1562 | }, |
1563 | }; |
1564 | |
1565 | void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) |
1566 | { |
1567 | XiveRouter *xrtr = XIVE_ROUTER(xive); |
1568 | uint8_t blk = xive->chip->chip_id; |
1569 | uint32_t srcno0 = XIVE_SRCNO(blk, 0); |
1570 | uint32_t nr_ipis = pnv_xive_nr_ipis(xive); |
1571 | uint32_t nr_ends = pnv_xive_nr_ends(xive); |
1572 | XiveEAS eas; |
1573 | XiveEND end; |
1574 | int i; |
1575 | |
1576 | monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n" , blk, srcno0, |
1577 | srcno0 + nr_ipis - 1); |
1578 | xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); |
1579 | |
1580 | monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n" , blk, srcno0, |
1581 | srcno0 + nr_ipis - 1); |
1582 | for (i = 0; i < nr_ipis; i++) { |
1583 | if (xive_router_get_eas(xrtr, blk, i, &eas)) { |
1584 | break; |
1585 | } |
1586 | if (!xive_eas_is_masked(&eas)) { |
1587 | xive_eas_pic_print_info(&eas, i, mon); |
1588 | } |
1589 | } |
1590 | |
1591 | monitor_printf(mon, "XIVE[%x] ENDT %08x .. %08x\n" , blk, 0, nr_ends - 1); |
1592 | for (i = 0; i < nr_ends; i++) { |
1593 | if (xive_router_get_end(xrtr, blk, i, &end)) { |
1594 | break; |
1595 | } |
1596 | xive_end_pic_print_info(&end, i, mon); |
1597 | } |
1598 | |
1599 | monitor_printf(mon, "XIVE[%x] END Escalation %08x .. %08x\n" , blk, 0, |
1600 | nr_ends - 1); |
1601 | for (i = 0; i < nr_ends; i++) { |
1602 | if (xive_router_get_end(xrtr, blk, i, &end)) { |
1603 | break; |
1604 | } |
1605 | xive_end_eas_pic_print_info(&end, i, mon); |
1606 | } |
1607 | } |
1608 | |
1609 | static void pnv_xive_reset(void *dev) |
1610 | { |
1611 | PnvXive *xive = PNV_XIVE(dev); |
1612 | XiveSource *xsrc = &xive->ipi_source; |
1613 | XiveENDSource *end_xsrc = &xive->end_source; |
1614 | |
1615 | /* |
1616 | * Use the PnvChip id to identify the XIVE interrupt controller. |
1617 | * It can be overriden by configuration at runtime. |
1618 | */ |
1619 | xive->tctx_chipid = xive->chip->chip_id; |
1620 | |
1621 | /* Default page size (Should be changed at runtime to 64k) */ |
1622 | xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; |
1623 | |
1624 | /* Clear subregions */ |
1625 | if (memory_region_is_mapped(&xsrc->esb_mmio)) { |
1626 | memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); |
1627 | } |
1628 | |
1629 | if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { |
1630 | memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); |
1631 | } |
1632 | |
1633 | if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { |
1634 | memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); |
1635 | } |
1636 | |
1637 | if (memory_region_is_mapped(&xive->end_edt_mmio)) { |
1638 | memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); |
1639 | } |
1640 | } |
1641 | |
1642 | static void pnv_xive_init(Object *obj) |
1643 | { |
1644 | PnvXive *xive = PNV_XIVE(obj); |
1645 | |
1646 | object_initialize_child(obj, "ipi_source" , &xive->ipi_source, |
1647 | sizeof(xive->ipi_source), TYPE_XIVE_SOURCE, |
1648 | &error_abort, NULL); |
1649 | object_initialize_child(obj, "end_source" , &xive->end_source, |
1650 | sizeof(xive->end_source), TYPE_XIVE_END_SOURCE, |
1651 | &error_abort, NULL); |
1652 | } |
1653 | |
1654 | /* |
1655 | * Maximum number of IRQs and ENDs supported by HW |
1656 | */ |
1657 | #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) |
1658 | #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) |
1659 | |
1660 | static void pnv_xive_realize(DeviceState *dev, Error **errp) |
1661 | { |
1662 | PnvXive *xive = PNV_XIVE(dev); |
1663 | XiveSource *xsrc = &xive->ipi_source; |
1664 | XiveENDSource *end_xsrc = &xive->end_source; |
1665 | Error *local_err = NULL; |
1666 | Object *obj; |
1667 | |
1668 | obj = object_property_get_link(OBJECT(dev), "chip" , &local_err); |
1669 | if (!obj) { |
1670 | error_propagate(errp, local_err); |
1671 | error_prepend(errp, "required link 'chip' not found: " ); |
1672 | return; |
1673 | } |
1674 | |
1675 | /* The PnvChip id identifies the XIVE interrupt controller. */ |
1676 | xive->chip = PNV_CHIP(obj); |
1677 | |
1678 | /* |
1679 | * The XiveSource and XiveENDSource objects are realized with the |
1680 | * maximum allowed HW configuration. The ESB MMIO regions will be |
1681 | * resized dynamically when the controller is configured by the FW |
1682 | * to limit accesses to resources not provisioned. |
1683 | */ |
1684 | object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs" , |
1685 | &error_fatal); |
1686 | object_property_add_const_link(OBJECT(xsrc), "xive" , OBJECT(xive), |
1687 | &error_fatal); |
1688 | object_property_set_bool(OBJECT(xsrc), true, "realized" , &local_err); |
1689 | if (local_err) { |
1690 | error_propagate(errp, local_err); |
1691 | return; |
1692 | } |
1693 | |
1694 | object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends" , |
1695 | &error_fatal); |
1696 | object_property_add_const_link(OBJECT(end_xsrc), "xive" , OBJECT(xive), |
1697 | &error_fatal); |
1698 | object_property_set_bool(OBJECT(end_xsrc), true, "realized" , &local_err); |
1699 | if (local_err) { |
1700 | error_propagate(errp, local_err); |
1701 | return; |
1702 | } |
1703 | |
1704 | /* Default page size. Generally changed at runtime to 64k */ |
1705 | xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; |
1706 | |
1707 | /* XSCOM region, used for initial configuration of the BARs */ |
1708 | memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, |
1709 | xive, "xscom-xive" , PNV9_XSCOM_XIVE_SIZE << 3); |
1710 | |
1711 | /* Interrupt controller MMIO regions */ |
1712 | memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic" , |
1713 | PNV9_XIVE_IC_SIZE); |
1714 | |
1715 | memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, |
1716 | xive, "xive-ic-reg" , 1 << xive->ic_shift); |
1717 | memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), |
1718 | &pnv_xive_ic_notify_ops, |
1719 | xive, "xive-ic-notify" , 1 << xive->ic_shift); |
1720 | |
1721 | /* The Pervasive LSI trigger and EOI pages (not modeled) */ |
1722 | memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, |
1723 | xive, "xive-ic-lsi" , 2 << xive->ic_shift); |
1724 | |
1725 | /* Thread Interrupt Management Area (Indirect) */ |
1726 | memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), |
1727 | &xive_tm_indirect_ops, |
1728 | xive, "xive-tima-indirect" , PNV9_XIVE_TM_SIZE); |
1729 | /* |
1730 | * Overall Virtualization Controller MMIO region containing the |
1731 | * IPI ESB pages and END ESB pages. The layout is defined by the |
1732 | * EDT "Domain table" and the accesses are dispatched using |
1733 | * address spaces for each. |
1734 | */ |
1735 | memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, |
1736 | "xive-vc" , PNV9_XIVE_VC_SIZE); |
1737 | |
1738 | memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi" , |
1739 | PNV9_XIVE_VC_SIZE); |
1740 | address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi" ); |
1741 | memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end" , |
1742 | PNV9_XIVE_VC_SIZE); |
1743 | address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end" ); |
1744 | |
1745 | /* |
1746 | * The MMIO windows exposing the IPI ESBs and the END ESBs in the |
1747 | * VC region. Their size is configured by the FW in the EDT table. |
1748 | */ |
1749 | memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt" , 0); |
1750 | memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt" , 0); |
1751 | |
1752 | /* Presenter Controller MMIO region (not modeled) */ |
1753 | memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, |
1754 | "xive-pc" , PNV9_XIVE_PC_SIZE); |
1755 | |
1756 | /* Thread Interrupt Management Area (Direct) */ |
1757 | memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, |
1758 | xive, "xive-tima" , PNV9_XIVE_TM_SIZE); |
1759 | |
1760 | qemu_register_reset(pnv_xive_reset, dev); |
1761 | } |
1762 | |
1763 | static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, |
1764 | int xscom_offset) |
1765 | { |
1766 | const char compat[] = "ibm,power9-xive-x" ; |
1767 | char *name; |
1768 | int offset; |
1769 | uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; |
1770 | uint32_t reg[] = { |
1771 | cpu_to_be32(lpc_pcba), |
1772 | cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) |
1773 | }; |
1774 | |
1775 | name = g_strdup_printf("xive@%x" , lpc_pcba); |
1776 | offset = fdt_add_subnode(fdt, xscom_offset, name); |
1777 | _FDT(offset); |
1778 | g_free(name); |
1779 | |
1780 | _FDT((fdt_setprop(fdt, offset, "reg" , reg, sizeof(reg)))); |
1781 | _FDT((fdt_setprop(fdt, offset, "compatible" , compat, |
1782 | sizeof(compat)))); |
1783 | return 0; |
1784 | } |
1785 | |
1786 | static Property pnv_xive_properties[] = { |
1787 | DEFINE_PROP_UINT64("ic-bar" , PnvXive, ic_base, 0), |
1788 | DEFINE_PROP_UINT64("vc-bar" , PnvXive, vc_base, 0), |
1789 | DEFINE_PROP_UINT64("pc-bar" , PnvXive, pc_base, 0), |
1790 | DEFINE_PROP_UINT64("tm-bar" , PnvXive, tm_base, 0), |
1791 | DEFINE_PROP_END_OF_LIST(), |
1792 | }; |
1793 | |
1794 | static void pnv_xive_class_init(ObjectClass *klass, void *data) |
1795 | { |
1796 | DeviceClass *dc = DEVICE_CLASS(klass); |
1797 | PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); |
1798 | XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); |
1799 | XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); |
1800 | |
1801 | xdc->dt_xscom = pnv_xive_dt_xscom; |
1802 | |
1803 | dc->desc = "PowerNV XIVE Interrupt Controller" ; |
1804 | dc->realize = pnv_xive_realize; |
1805 | dc->props = pnv_xive_properties; |
1806 | |
1807 | xrc->get_eas = pnv_xive_get_eas; |
1808 | xrc->get_end = pnv_xive_get_end; |
1809 | xrc->write_end = pnv_xive_write_end; |
1810 | xrc->get_nvt = pnv_xive_get_nvt; |
1811 | xrc->write_nvt = pnv_xive_write_nvt; |
1812 | xrc->get_tctx = pnv_xive_get_tctx; |
1813 | |
1814 | xnc->notify = pnv_xive_notify; |
1815 | }; |
1816 | |
1817 | static const TypeInfo pnv_xive_info = { |
1818 | .name = TYPE_PNV_XIVE, |
1819 | .parent = TYPE_XIVE_ROUTER, |
1820 | .instance_init = pnv_xive_init, |
1821 | .instance_size = sizeof(PnvXive), |
1822 | .class_init = pnv_xive_class_init, |
1823 | .interfaces = (InterfaceInfo[]) { |
1824 | { TYPE_PNV_XSCOM_INTERFACE }, |
1825 | { } |
1826 | } |
1827 | }; |
1828 | |
1829 | static void pnv_xive_register_types(void) |
1830 | { |
1831 | type_register_static(&pnv_xive_info); |
1832 | } |
1833 | |
1834 | type_init(pnv_xive_register_types) |
1835 | |