1/*
2 * QEMU PowerPC sPAPR IRQ interface
3 *
4 * Copyright (c) 2018, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
12#include "qemu/error-report.h"
13#include "qapi/error.h"
14#include "hw/irq.h"
15#include "hw/ppc/spapr.h"
16#include "hw/ppc/spapr_cpu_core.h"
17#include "hw/ppc/spapr_xive.h"
18#include "hw/ppc/xics.h"
19#include "hw/ppc/xics_spapr.h"
20#include "hw/qdev-properties.h"
21#include "cpu-models.h"
22#include "sysemu/kvm.h"
23
24#include "trace.h"
25
26void spapr_irq_msi_init(SpaprMachineState *spapr, uint32_t nr_msis)
27{
28 spapr->irq_map_nr = nr_msis;
29 spapr->irq_map = bitmap_new(spapr->irq_map_nr);
30}
31
32int spapr_irq_msi_alloc(SpaprMachineState *spapr, uint32_t num, bool align,
33 Error **errp)
34{
35 int irq;
36
37 /*
38 * The 'align_mask' parameter of bitmap_find_next_zero_area()
39 * should be one less than a power of 2; 0 means no
40 * alignment. Adapt the 'align' value of the former allocator
41 * to fit the requirements of bitmap_find_next_zero_area()
42 */
43 align -= 1;
44
45 irq = bitmap_find_next_zero_area(spapr->irq_map, spapr->irq_map_nr, 0, num,
46 align);
47 if (irq == spapr->irq_map_nr) {
48 error_setg(errp, "can't find a free %d-IRQ block", num);
49 return -1;
50 }
51
52 bitmap_set(spapr->irq_map, irq, num);
53
54 return irq + SPAPR_IRQ_MSI;
55}
56
57void spapr_irq_msi_free(SpaprMachineState *spapr, int irq, uint32_t num)
58{
59 bitmap_clear(spapr->irq_map, irq - SPAPR_IRQ_MSI, num);
60}
61
62static void spapr_irq_init_kvm(SpaprMachineState *spapr,
63 SpaprIrq *irq, Error **errp)
64{
65 MachineState *machine = MACHINE(spapr);
66 Error *local_err = NULL;
67
68 if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) {
69 irq->init_kvm(spapr, &local_err);
70 if (local_err && machine_kernel_irqchip_required(machine)) {
71 error_prepend(&local_err,
72 "kernel_irqchip requested but unavailable: ");
73 error_propagate(errp, local_err);
74 return;
75 }
76
77 if (!local_err) {
78 return;
79 }
80
81 /*
82 * We failed to initialize the KVM device, fallback to
83 * emulated mode
84 */
85 error_prepend(&local_err, "kernel_irqchip allowed but unavailable: ");
86 error_append_hint(&local_err, "Falling back to kernel-irqchip=off\n");
87 warn_report_err(local_err);
88 }
89}
90
91/*
92 * XICS IRQ backend.
93 */
94
95static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs,
96 Error **errp)
97{
98 Object *obj;
99 Error *local_err = NULL;
100
101 obj = object_new(TYPE_ICS_SIMPLE);
102 object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort);
103 object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr),
104 &error_fatal);
105 object_property_set_int(obj, nr_irqs, "nr-irqs", &error_fatal);
106 object_property_set_bool(obj, true, "realized", &local_err);
107 if (local_err) {
108 error_propagate(errp, local_err);
109 return;
110 }
111
112 spapr->ics = ICS_BASE(obj);
113
114 xics_spapr_init(spapr);
115}
116
117#define ICS_IRQ_FREE(ics, srcno) \
118 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
119
120static int spapr_irq_claim_xics(SpaprMachineState *spapr, int irq, bool lsi,
121 Error **errp)
122{
123 ICSState *ics = spapr->ics;
124
125 assert(ics);
126
127 if (!ics_valid_irq(ics, irq)) {
128 error_setg(errp, "IRQ %d is invalid", irq);
129 return -1;
130 }
131
132 if (!ICS_IRQ_FREE(ics, irq - ics->offset)) {
133 error_setg(errp, "IRQ %d is not free", irq);
134 return -1;
135 }
136
137 ics_set_irq_type(ics, irq - ics->offset, lsi);
138 return 0;
139}
140
141static void spapr_irq_free_xics(SpaprMachineState *spapr, int irq, int num)
142{
143 ICSState *ics = spapr->ics;
144 uint32_t srcno = irq - ics->offset;
145 int i;
146
147 if (ics_valid_irq(ics, irq)) {
148 trace_spapr_irq_free(0, irq, num);
149 for (i = srcno; i < srcno + num; ++i) {
150 if (ICS_IRQ_FREE(ics, i)) {
151 trace_spapr_irq_free_warn(0, i);
152 }
153 memset(&ics->irqs[i], 0, sizeof(ICSIRQState));
154 }
155 }
156}
157
158static qemu_irq spapr_qirq_xics(SpaprMachineState *spapr, int irq)
159{
160 ICSState *ics = spapr->ics;
161 uint32_t srcno = irq - ics->offset;
162
163 if (ics_valid_irq(ics, irq)) {
164 return spapr->qirqs[srcno];
165 }
166
167 return NULL;
168}
169
170static void spapr_irq_print_info_xics(SpaprMachineState *spapr, Monitor *mon)
171{
172 CPUState *cs;
173
174 CPU_FOREACH(cs) {
175 PowerPCCPU *cpu = POWERPC_CPU(cs);
176
177 icp_pic_print_info(spapr_cpu_state(cpu)->icp, mon);
178 }
179
180 ics_pic_print_info(spapr->ics, mon);
181}
182
183static void spapr_irq_cpu_intc_create_xics(SpaprMachineState *spapr,
184 PowerPCCPU *cpu, Error **errp)
185{
186 Error *local_err = NULL;
187 Object *obj;
188 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
189
190 obj = icp_create(OBJECT(cpu), TYPE_ICP, XICS_FABRIC(spapr),
191 &local_err);
192 if (local_err) {
193 error_propagate(errp, local_err);
194 return;
195 }
196
197 spapr_cpu->icp = ICP(obj);
198}
199
200static int spapr_irq_post_load_xics(SpaprMachineState *spapr, int version_id)
201{
202 if (!kvm_irqchip_in_kernel()) {
203 CPUState *cs;
204 CPU_FOREACH(cs) {
205 PowerPCCPU *cpu = POWERPC_CPU(cs);
206 icp_resend(spapr_cpu_state(cpu)->icp);
207 }
208 }
209 return 0;
210}
211
212static void spapr_irq_set_irq_xics(void *opaque, int srcno, int val)
213{
214 SpaprMachineState *spapr = opaque;
215
216 ics_simple_set_irq(spapr->ics, srcno, val);
217}
218
219static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp)
220{
221 Error *local_err = NULL;
222
223 spapr_irq_init_kvm(spapr, &spapr_irq_xics, &local_err);
224 if (local_err) {
225 error_propagate(errp, local_err);
226 return;
227 }
228}
229
230static const char *spapr_irq_get_nodename_xics(SpaprMachineState *spapr)
231{
232 return XICS_NODENAME;
233}
234
235static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp)
236{
237 if (kvm_enabled()) {
238 xics_kvm_connect(spapr, errp);
239 }
240}
241
242#define SPAPR_IRQ_XICS_NR_IRQS 0x1000
243#define SPAPR_IRQ_XICS_NR_MSIS \
244 (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI)
245
246SpaprIrq spapr_irq_xics = {
247 .nr_irqs = SPAPR_IRQ_XICS_NR_IRQS,
248 .nr_msis = SPAPR_IRQ_XICS_NR_MSIS,
249 .ov5 = SPAPR_OV5_XIVE_LEGACY,
250
251 .init = spapr_irq_init_xics,
252 .claim = spapr_irq_claim_xics,
253 .free = spapr_irq_free_xics,
254 .qirq = spapr_qirq_xics,
255 .print_info = spapr_irq_print_info_xics,
256 .dt_populate = spapr_dt_xics,
257 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
258 .post_load = spapr_irq_post_load_xics,
259 .reset = spapr_irq_reset_xics,
260 .set_irq = spapr_irq_set_irq_xics,
261 .get_nodename = spapr_irq_get_nodename_xics,
262 .init_kvm = spapr_irq_init_kvm_xics,
263};
264
265/*
266 * XIVE IRQ backend.
267 */
268static void spapr_irq_init_xive(SpaprMachineState *spapr, int nr_irqs,
269 Error **errp)
270{
271 uint32_t nr_servers = spapr_max_server_number(spapr);
272 DeviceState *dev;
273 int i;
274
275 dev = qdev_create(NULL, TYPE_SPAPR_XIVE);
276 qdev_prop_set_uint32(dev, "nr-irqs", nr_irqs);
277 /*
278 * 8 XIVE END structures per CPU. One for each available priority
279 */
280 qdev_prop_set_uint32(dev, "nr-ends", nr_servers << 3);
281 qdev_init_nofail(dev);
282
283 spapr->xive = SPAPR_XIVE(dev);
284
285 /* Enable the CPU IPIs */
286 for (i = 0; i < nr_servers; ++i) {
287 spapr_xive_irq_claim(spapr->xive, SPAPR_IRQ_IPI + i, false);
288 }
289
290 spapr_xive_hcall_init(spapr);
291}
292
293static int spapr_irq_claim_xive(SpaprMachineState *spapr, int irq, bool lsi,
294 Error **errp)
295{
296 if (!spapr_xive_irq_claim(spapr->xive, irq, lsi)) {
297 error_setg(errp, "IRQ %d is invalid", irq);
298 return -1;
299 }
300 return 0;
301}
302
303static void spapr_irq_free_xive(SpaprMachineState *spapr, int irq, int num)
304{
305 int i;
306
307 for (i = irq; i < irq + num; ++i) {
308 spapr_xive_irq_free(spapr->xive, i);
309 }
310}
311
312static qemu_irq spapr_qirq_xive(SpaprMachineState *spapr, int irq)
313{
314 SpaprXive *xive = spapr->xive;
315
316 if (irq >= xive->nr_irqs) {
317 return NULL;
318 }
319
320 /* The sPAPR machine/device should have claimed the IRQ before */
321 assert(xive_eas_is_valid(&xive->eat[irq]));
322
323 return spapr->qirqs[irq];
324}
325
326static void spapr_irq_print_info_xive(SpaprMachineState *spapr,
327 Monitor *mon)
328{
329 CPUState *cs;
330
331 CPU_FOREACH(cs) {
332 PowerPCCPU *cpu = POWERPC_CPU(cs);
333
334 xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, mon);
335 }
336
337 spapr_xive_pic_print_info(spapr->xive, mon);
338}
339
340static void spapr_irq_cpu_intc_create_xive(SpaprMachineState *spapr,
341 PowerPCCPU *cpu, Error **errp)
342{
343 Error *local_err = NULL;
344 Object *obj;
345 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
346
347 obj = xive_tctx_create(OBJECT(cpu), XIVE_ROUTER(spapr->xive), &local_err);
348 if (local_err) {
349 error_propagate(errp, local_err);
350 return;
351 }
352
353 spapr_cpu->tctx = XIVE_TCTX(obj);
354
355 /*
356 * (TCG) Early setting the OS CAM line for hotplugged CPUs as they
357 * don't beneficiate from the reset of the XIVE IRQ backend
358 */
359 spapr_xive_set_tctx_os_cam(spapr_cpu->tctx);
360}
361
362static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id)
363{
364 return spapr_xive_post_load(spapr->xive, version_id);
365}
366
367static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp)
368{
369 CPUState *cs;
370 Error *local_err = NULL;
371
372 CPU_FOREACH(cs) {
373 PowerPCCPU *cpu = POWERPC_CPU(cs);
374
375 /* (TCG) Set the OS CAM line of the thread interrupt context. */
376 spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx);
377 }
378
379 spapr_irq_init_kvm(spapr, &spapr_irq_xive, &local_err);
380 if (local_err) {
381 error_propagate(errp, local_err);
382 return;
383 }
384
385 /* Activate the XIVE MMIOs */
386 spapr_xive_mmio_set_enabled(spapr->xive, true);
387}
388
389static void spapr_irq_set_irq_xive(void *opaque, int srcno, int val)
390{
391 SpaprMachineState *spapr = opaque;
392
393 if (kvm_irqchip_in_kernel()) {
394 kvmppc_xive_source_set_irq(&spapr->xive->source, srcno, val);
395 } else {
396 xive_source_set_irq(&spapr->xive->source, srcno, val);
397 }
398}
399
400static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr)
401{
402 return spapr->xive->nodename;
403}
404
405static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp)
406{
407 if (kvm_enabled()) {
408 kvmppc_xive_connect(spapr->xive, errp);
409 }
410}
411
412/*
413 * XIVE uses the full IRQ number space. Set it to 8K to be compatible
414 * with XICS.
415 */
416
417#define SPAPR_IRQ_XIVE_NR_IRQS 0x2000
418#define SPAPR_IRQ_XIVE_NR_MSIS (SPAPR_IRQ_XIVE_NR_IRQS - SPAPR_IRQ_MSI)
419
420SpaprIrq spapr_irq_xive = {
421 .nr_irqs = SPAPR_IRQ_XIVE_NR_IRQS,
422 .nr_msis = SPAPR_IRQ_XIVE_NR_MSIS,
423 .ov5 = SPAPR_OV5_XIVE_EXPLOIT,
424
425 .init = spapr_irq_init_xive,
426 .claim = spapr_irq_claim_xive,
427 .free = spapr_irq_free_xive,
428 .qirq = spapr_qirq_xive,
429 .print_info = spapr_irq_print_info_xive,
430 .dt_populate = spapr_dt_xive,
431 .cpu_intc_create = spapr_irq_cpu_intc_create_xive,
432 .post_load = spapr_irq_post_load_xive,
433 .reset = spapr_irq_reset_xive,
434 .set_irq = spapr_irq_set_irq_xive,
435 .get_nodename = spapr_irq_get_nodename_xive,
436 .init_kvm = spapr_irq_init_kvm_xive,
437};
438
439/*
440 * Dual XIVE and XICS IRQ backend.
441 *
442 * Both interrupt mode, XIVE and XICS, objects are created but the
443 * machine starts in legacy interrupt mode (XICS). It can be changed
444 * by the CAS negotiation process and, in that case, the new mode is
445 * activated after an extra machine reset.
446 */
447
448/*
449 * Returns the sPAPR IRQ backend negotiated by CAS. XICS is the
450 * default.
451 */
452static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr)
453{
454 return spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT) ?
455 &spapr_irq_xive : &spapr_irq_xics;
456}
457
458static void spapr_irq_init_dual(SpaprMachineState *spapr, int nr_irqs,
459 Error **errp)
460{
461 Error *local_err = NULL;
462
463 spapr_irq_xics.init(spapr, spapr_irq_xics.nr_irqs, &local_err);
464 if (local_err) {
465 error_propagate(errp, local_err);
466 return;
467 }
468
469 spapr_irq_xive.init(spapr, spapr_irq_xive.nr_irqs, &local_err);
470 if (local_err) {
471 error_propagate(errp, local_err);
472 return;
473 }
474}
475
476static int spapr_irq_claim_dual(SpaprMachineState *spapr, int irq, bool lsi,
477 Error **errp)
478{
479 Error *local_err = NULL;
480 int ret;
481
482 ret = spapr_irq_xics.claim(spapr, irq, lsi, &local_err);
483 if (local_err) {
484 error_propagate(errp, local_err);
485 return ret;
486 }
487
488 ret = spapr_irq_xive.claim(spapr, irq, lsi, &local_err);
489 if (local_err) {
490 error_propagate(errp, local_err);
491 return ret;
492 }
493
494 return ret;
495}
496
497static void spapr_irq_free_dual(SpaprMachineState *spapr, int irq, int num)
498{
499 spapr_irq_xics.free(spapr, irq, num);
500 spapr_irq_xive.free(spapr, irq, num);
501}
502
503static qemu_irq spapr_qirq_dual(SpaprMachineState *spapr, int irq)
504{
505 return spapr_irq_current(spapr)->qirq(spapr, irq);
506}
507
508static void spapr_irq_print_info_dual(SpaprMachineState *spapr, Monitor *mon)
509{
510 spapr_irq_current(spapr)->print_info(spapr, mon);
511}
512
513static void spapr_irq_dt_populate_dual(SpaprMachineState *spapr,
514 uint32_t nr_servers, void *fdt,
515 uint32_t phandle)
516{
517 spapr_irq_current(spapr)->dt_populate(spapr, nr_servers, fdt, phandle);
518}
519
520static void spapr_irq_cpu_intc_create_dual(SpaprMachineState *spapr,
521 PowerPCCPU *cpu, Error **errp)
522{
523 Error *local_err = NULL;
524
525 spapr_irq_xive.cpu_intc_create(spapr, cpu, &local_err);
526 if (local_err) {
527 error_propagate(errp, local_err);
528 return;
529 }
530
531 spapr_irq_xics.cpu_intc_create(spapr, cpu, errp);
532}
533
534static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id)
535{
536 /*
537 * Force a reset of the XIVE backend after migration. The machine
538 * defaults to XICS at startup.
539 */
540 if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
541 if (kvm_irqchip_in_kernel()) {
542 xics_kvm_disconnect(spapr, &error_fatal);
543 }
544 spapr_irq_xive.reset(spapr, &error_fatal);
545 }
546
547 return spapr_irq_current(spapr)->post_load(spapr, version_id);
548}
549
550static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp)
551{
552 Error *local_err = NULL;
553
554 /*
555 * Deactivate the XIVE MMIOs. The XIVE backend will reenable them
556 * if selected.
557 */
558 spapr_xive_mmio_set_enabled(spapr->xive, false);
559
560 /* Destroy all KVM devices */
561 if (kvm_irqchip_in_kernel()) {
562 xics_kvm_disconnect(spapr, &local_err);
563 if (local_err) {
564 error_propagate(errp, local_err);
565 error_prepend(errp, "KVM XICS disconnect failed: ");
566 return;
567 }
568 kvmppc_xive_disconnect(spapr->xive, &local_err);
569 if (local_err) {
570 error_propagate(errp, local_err);
571 error_prepend(errp, "KVM XIVE disconnect failed: ");
572 return;
573 }
574 }
575
576 spapr_irq_current(spapr)->reset(spapr, errp);
577}
578
579static void spapr_irq_set_irq_dual(void *opaque, int srcno, int val)
580{
581 SpaprMachineState *spapr = opaque;
582
583 spapr_irq_current(spapr)->set_irq(spapr, srcno, val);
584}
585
586static const char *spapr_irq_get_nodename_dual(SpaprMachineState *spapr)
587{
588 return spapr_irq_current(spapr)->get_nodename(spapr);
589}
590
591/*
592 * Define values in sync with the XIVE and XICS backend
593 */
594#define SPAPR_IRQ_DUAL_NR_IRQS 0x2000
595#define SPAPR_IRQ_DUAL_NR_MSIS (SPAPR_IRQ_DUAL_NR_IRQS - SPAPR_IRQ_MSI)
596
597SpaprIrq spapr_irq_dual = {
598 .nr_irqs = SPAPR_IRQ_DUAL_NR_IRQS,
599 .nr_msis = SPAPR_IRQ_DUAL_NR_MSIS,
600 .ov5 = SPAPR_OV5_XIVE_BOTH,
601
602 .init = spapr_irq_init_dual,
603 .claim = spapr_irq_claim_dual,
604 .free = spapr_irq_free_dual,
605 .qirq = spapr_qirq_dual,
606 .print_info = spapr_irq_print_info_dual,
607 .dt_populate = spapr_irq_dt_populate_dual,
608 .cpu_intc_create = spapr_irq_cpu_intc_create_dual,
609 .post_load = spapr_irq_post_load_dual,
610 .reset = spapr_irq_reset_dual,
611 .set_irq = spapr_irq_set_irq_dual,
612 .get_nodename = spapr_irq_get_nodename_dual,
613 .init_kvm = NULL, /* should not be used */
614};
615
616
617static void spapr_irq_check(SpaprMachineState *spapr, Error **errp)
618{
619 MachineState *machine = MACHINE(spapr);
620
621 /*
622 * Sanity checks on non-P9 machines. On these, XIVE is not
623 * advertised, see spapr_dt_ov5_platform_support()
624 */
625 if (!ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00,
626 0, spapr->max_compat_pvr)) {
627 /*
628 * If the 'dual' interrupt mode is selected, force XICS as CAS
629 * negotiation is useless.
630 */
631 if (spapr->irq == &spapr_irq_dual) {
632 spapr->irq = &spapr_irq_xics;
633 return;
634 }
635
636 /*
637 * Non-P9 machines using only XIVE is a bogus setup. We have two
638 * scenarios to take into account because of the compat mode:
639 *
640 * 1. POWER7/8 machines should fail to init later on when creating
641 * the XIVE interrupt presenters because a POWER9 exception
642 * model is required.
643
644 * 2. POWER9 machines using the POWER8 compat mode won't fail and
645 * will let the OS boot with a partial XIVE setup : DT
646 * properties but no hcalls.
647 *
648 * To cover both and not confuse the OS, add an early failure in
649 * QEMU.
650 */
651 if (spapr->irq == &spapr_irq_xive) {
652 error_setg(errp, "XIVE-only machines require a POWER9 CPU");
653 return;
654 }
655 }
656
657 /*
658 * On a POWER9 host, some older KVM XICS devices cannot be destroyed and
659 * re-created. Detect that early to avoid QEMU to exit later when the
660 * guest reboots.
661 */
662 if (kvm_enabled() &&
663 spapr->irq == &spapr_irq_dual &&
664 machine_kernel_irqchip_required(machine) &&
665 xics_kvm_has_broken_disconnect(spapr)) {
666 error_setg(errp, "KVM is too old to support ic-mode=dual,kernel-irqchip=on");
667 return;
668 }
669}
670
671/*
672 * sPAPR IRQ frontend routines for devices
673 */
674void spapr_irq_init(SpaprMachineState *spapr, Error **errp)
675{
676 MachineState *machine = MACHINE(spapr);
677 Error *local_err = NULL;
678
679 if (machine_kernel_irqchip_split(machine)) {
680 error_setg(errp, "kernel_irqchip split mode not supported on pseries");
681 return;
682 }
683
684 if (!kvm_enabled() && machine_kernel_irqchip_required(machine)) {
685 error_setg(errp,
686 "kernel_irqchip requested but only available with KVM");
687 return;
688 }
689
690 spapr_irq_check(spapr, &local_err);
691 if (local_err) {
692 error_propagate(errp, local_err);
693 return;
694 }
695
696 /* Initialize the MSI IRQ allocator. */
697 if (!SPAPR_MACHINE_GET_CLASS(spapr)->legacy_irq_allocation) {
698 spapr_irq_msi_init(spapr, spapr->irq->nr_msis);
699 }
700
701 spapr->irq->init(spapr, spapr->irq->nr_irqs, errp);
702
703 spapr->qirqs = qemu_allocate_irqs(spapr->irq->set_irq, spapr,
704 spapr->irq->nr_irqs);
705}
706
707int spapr_irq_claim(SpaprMachineState *spapr, int irq, bool lsi, Error **errp)
708{
709 return spapr->irq->claim(spapr, irq, lsi, errp);
710}
711
712void spapr_irq_free(SpaprMachineState *spapr, int irq, int num)
713{
714 spapr->irq->free(spapr, irq, num);
715}
716
717qemu_irq spapr_qirq(SpaprMachineState *spapr, int irq)
718{
719 return spapr->irq->qirq(spapr, irq);
720}
721
722int spapr_irq_post_load(SpaprMachineState *spapr, int version_id)
723{
724 return spapr->irq->post_load(spapr, version_id);
725}
726
727void spapr_irq_reset(SpaprMachineState *spapr, Error **errp)
728{
729 assert(!spapr->irq_map || bitmap_empty(spapr->irq_map, spapr->irq_map_nr));
730
731 if (spapr->irq->reset) {
732 spapr->irq->reset(spapr, errp);
733 }
734}
735
736int spapr_irq_get_phandle(SpaprMachineState *spapr, void *fdt, Error **errp)
737{
738 const char *nodename = spapr->irq->get_nodename(spapr);
739 int offset, phandle;
740
741 offset = fdt_subnode_offset(fdt, 0, nodename);
742 if (offset < 0) {
743 error_setg(errp, "Can't find node \"%s\": %s", nodename,
744 fdt_strerror(offset));
745 return -1;
746 }
747
748 phandle = fdt_get_phandle(fdt, offset);
749 if (!phandle) {
750 error_setg(errp, "Can't get phandle of node \"%s\"", nodename);
751 return -1;
752 }
753
754 return phandle;
755}
756
757/*
758 * XICS legacy routines - to deprecate one day
759 */
760
761static int ics_find_free_block(ICSState *ics, int num, int alignnum)
762{
763 int first, i;
764
765 for (first = 0; first < ics->nr_irqs; first += alignnum) {
766 if (num > (ics->nr_irqs - first)) {
767 return -1;
768 }
769 for (i = first; i < first + num; ++i) {
770 if (!ICS_IRQ_FREE(ics, i)) {
771 break;
772 }
773 }
774 if (i == (first + num)) {
775 return first;
776 }
777 }
778
779 return -1;
780}
781
782int spapr_irq_find(SpaprMachineState *spapr, int num, bool align, Error **errp)
783{
784 ICSState *ics = spapr->ics;
785 int first = -1;
786
787 assert(ics);
788
789 /*
790 * MSIMesage::data is used for storing VIRQ so
791 * it has to be aligned to num to support multiple
792 * MSI vectors. MSI-X is not affected by this.
793 * The hint is used for the first IRQ, the rest should
794 * be allocated continuously.
795 */
796 if (align) {
797 assert((num == 1) || (num == 2) || (num == 4) ||
798 (num == 8) || (num == 16) || (num == 32));
799 first = ics_find_free_block(ics, num, num);
800 } else {
801 first = ics_find_free_block(ics, num, 1);
802 }
803
804 if (first < 0) {
805 error_setg(errp, "can't find a free %d-IRQ block", num);
806 return -1;
807 }
808
809 return first + ics->offset;
810}
811
812#define SPAPR_IRQ_XICS_LEGACY_NR_IRQS 0x400
813
814SpaprIrq spapr_irq_xics_legacy = {
815 .nr_irqs = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
816 .nr_msis = SPAPR_IRQ_XICS_LEGACY_NR_IRQS,
817 .ov5 = SPAPR_OV5_XIVE_LEGACY,
818
819 .init = spapr_irq_init_xics,
820 .claim = spapr_irq_claim_xics,
821 .free = spapr_irq_free_xics,
822 .qirq = spapr_qirq_xics,
823 .print_info = spapr_irq_print_info_xics,
824 .dt_populate = spapr_dt_xics,
825 .cpu_intc_create = spapr_irq_cpu_intc_create_xics,
826 .post_load = spapr_irq_post_load_xics,
827 .reset = spapr_irq_reset_xics,
828 .set_irq = spapr_irq_set_irq_xics,
829 .get_nodename = spapr_irq_get_nodename_xics,
830 .init_kvm = spapr_irq_init_kvm_xics,
831};
832