1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2016,2017 IBM Corporation.
6 #define pr_fmt(fmt) "xive: " fmt
8 #include <linux/types.h>
10 #include <linux/debugfs.h>
11 #include <linux/smp.h>
12 #include <linux/interrupt.h>
13 #include <linux/seq_file.h>
14 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/delay.h>
19 #include <linux/cpumask.h>
21 #include <linux/kmemleak.h>
23 #include <asm/machdep.h>
28 #include <asm/errno.h>
30 #include <asm/xive-regs.h>
32 #include <asm/kvm_ppc.h>
34 #include "xive-internal.h"
37 static u32 xive_provision_size;
38 static u32 *xive_provision_chips;
39 static u32 xive_provision_chip_count;
40 static u32 xive_queue_shift;
41 static u32 xive_pool_vps = XIVE_INVALID_VP;
42 static struct kmem_cache *xive_provision_cache;
43 static bool xive_has_single_esc;
45 int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
47 __be64 flags, eoi_page, trig_page;
48 __be32 esb_shift, src_chip;
52 memset(data, 0, sizeof(*data));
54 rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
55 &esb_shift, &src_chip);
57 pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
62 opal_flags = be64_to_cpu(flags);
63 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
64 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
65 if (opal_flags & OPAL_XIVE_IRQ_LSI)
66 data->flags |= XIVE_IRQ_FLAG_LSI;
67 if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
68 data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
69 if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
70 data->flags |= XIVE_IRQ_FLAG_MASK_FW;
71 if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
72 data->flags |= XIVE_IRQ_FLAG_EOI_FW;
73 data->eoi_page = be64_to_cpu(eoi_page);
74 data->trig_page = be64_to_cpu(trig_page);
75 data->esb_shift = be32_to_cpu(esb_shift);
76 data->src_chip = be32_to_cpu(src_chip);
78 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
79 if (!data->eoi_mmio) {
80 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
84 data->hw_irq = hw_irq;
88 if (data->trig_page == data->eoi_page) {
89 data->trig_mmio = data->eoi_mmio;
93 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
94 if (!data->trig_mmio) {
95 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
100 EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
102 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
107 rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
110 msleep(OPAL_BUSY_DELAY_MS);
112 return rc == 0 ? 0 : -ENXIO;
114 EXPORT_SYMBOL_GPL(xive_native_configure_irq);
116 static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
123 rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
125 *target = be64_to_cpu(vp);
126 *sw_irq = be32_to_cpu(lirq);
128 return rc == 0 ? 0 : -ENXIO;
131 /* This can be called multiple time to change a queue configuration */
132 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
133 __be32 *qpage, u32 order, bool can_escalate)
138 u64 flags, qpage_phys;
140 /* If there's an actual queue page, clean it */
144 qpage_phys = __pa(qpage);
148 /* Initialize the rest of the fields */
149 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
153 rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
158 pr_err("Error %lld getting queue info prio %d\n", rc, prio);
162 q->eoi_phys = be64_to_cpu(qeoi_page_be);
165 flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
167 /* Escalation needed ? */
169 q->esc_irq = be32_to_cpu(esc_irq_be);
170 flags |= OPAL_XIVE_EQ_ESCALATE;
173 /* Configure and enable the queue in HW */
175 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
178 msleep(OPAL_BUSY_DELAY_MS);
181 pr_err("Error %lld setting queue for prio %d\n", rc, prio);
185 * KVM code requires all of the above to be visible before
186 * q->qpage is set due to how it manages IPI EOIs
194 EXPORT_SYMBOL_GPL(xive_native_configure_queue);
196 static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
200 /* Disable the queue in HW */
202 rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
205 msleep(OPAL_BUSY_DELAY_MS);
208 pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
211 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
213 __xive_native_disable_queue(vp_id, q, prio);
215 EXPORT_SYMBOL_GPL(xive_native_disable_queue);
217 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
219 struct xive_q *q = &xc->queue[prio];
222 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
224 return PTR_ERR(qpage);
226 return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
227 q, prio, qpage, xive_queue_shift, false);
230 static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
232 struct xive_q *q = &xc->queue[prio];
233 unsigned int alloc_order;
236 * We use the variant with no iounmap as this is called on exec
237 * from an IPI and iounmap isn't safe
239 __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
240 alloc_order = xive_alloc_order(xive_queue_shift);
241 free_pages((unsigned long)q->qpage, alloc_order);
245 static bool xive_native_match(struct device_node *node)
247 return of_device_is_compatible(node, "ibm,opal-xive-vc");
250 static s64 opal_xive_allocate_irq(u32 chip_id)
252 s64 irq = opal_xive_allocate_irq_raw(chip_id);
255 * Old versions of skiboot can incorrectly return 0xffffffff to
256 * indicate no space, fix it up here.
258 return irq == 0xffffffff ? OPAL_RESOURCE : irq;
262 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
266 /* Allocate an IPI and populate info about it */
268 irq = opal_xive_allocate_irq(xc->chip_id);
269 if (irq == OPAL_BUSY) {
270 msleep(OPAL_BUSY_DELAY_MS);
274 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
282 #endif /* CONFIG_SMP */
284 u32 xive_native_alloc_irq_on_chip(u32 chip_id)
289 rc = opal_xive_allocate_irq(chip_id);
292 msleep(OPAL_BUSY_DELAY_MS);
298 EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
300 void xive_native_free_irq(u32 irq)
303 s64 rc = opal_xive_free_irq(irq);
306 msleep(OPAL_BUSY_DELAY_MS);
309 EXPORT_SYMBOL_GPL(xive_native_free_irq);
312 static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
317 if (xc->hw_ipi == XIVE_BAD_IRQ)
320 rc = opal_xive_free_irq(xc->hw_ipi);
321 if (rc == OPAL_BUSY) {
322 msleep(OPAL_BUSY_DELAY_MS);
325 xc->hw_ipi = XIVE_BAD_IRQ;
329 #endif /* CONFIG_SMP */
331 static void xive_native_shutdown(void)
333 /* Switch the XIVE to emulation mode */
334 opal_xive_reset(OPAL_XIVE_MODE_EMU);
338 * Perform an "ack" cycle on the current thread, thus
339 * grabbing the pending active priorities and updating
340 * the CPPR to the most favored one.
342 static void xive_native_update_pending(struct xive_cpu *xc)
347 /* Perform the acknowledge hypervisor to register cycle */
348 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
350 /* Synchronize subsequent queue accesses */
354 * Grab the CPPR and the "HE" field which indicates the source
355 * of the hypervisor interrupt (if any)
358 he = (ack >> 8) >> 6;
360 case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
362 case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
365 /* Mark the priority pending */
366 xc->pending_prio |= 1 << cppr;
369 * A new interrupt should never have a CPPR less favored
370 * than our current one.
372 if (cppr >= xc->cppr)
373 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
374 smp_processor_id(), cppr, xc->cppr);
376 /* Update our idea of what the CPPR is */
379 case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
380 case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
381 pr_err("CPU %d got unexpected interrupt type HE=%d\n",
382 smp_processor_id(), he);
387 static void xive_native_eoi(u32 hw_irq)
390 * Not normally used except if specific interrupts need
391 * a workaround on EOI.
393 opal_int_eoi(hw_irq);
396 static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
403 if (xive_pool_vps == XIVE_INVALID_VP)
406 /* Check if pool VP already active, if it is, pull it */
407 if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
408 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
410 /* Enable the pool VP */
411 vp = xive_pool_vps + cpu;
413 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
416 msleep(OPAL_BUSY_DELAY_MS);
419 pr_err("Failed to enable pool VP on CPU %d\n", cpu);
423 /* Grab it's CAM value */
424 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
426 pr_err("Failed to get pool VP info CPU %d\n", cpu);
429 vp_cam = be64_to_cpu(vp_cam_be);
431 /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
432 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
433 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
436 static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
441 if (xive_pool_vps == XIVE_INVALID_VP)
444 /* Pull the pool VP from the CPU */
445 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
448 vp = xive_pool_vps + cpu;
450 rc = opal_xive_set_vp_info(vp, 0, 0);
453 msleep(OPAL_BUSY_DELAY_MS);
457 void xive_native_sync_source(u32 hw_irq)
459 opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
461 EXPORT_SYMBOL_GPL(xive_native_sync_source);
463 void xive_native_sync_queue(u32 hw_irq)
465 opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
467 EXPORT_SYMBOL_GPL(xive_native_sync_queue);
469 static const struct xive_ops xive_native_ops = {
470 .populate_irq_data = xive_native_populate_irq_data,
471 .configure_irq = xive_native_configure_irq,
472 .get_irq_config = xive_native_get_irq_config,
473 .setup_queue = xive_native_setup_queue,
474 .cleanup_queue = xive_native_cleanup_queue,
475 .match = xive_native_match,
476 .shutdown = xive_native_shutdown,
477 .update_pending = xive_native_update_pending,
478 .eoi = xive_native_eoi,
479 .setup_cpu = xive_native_setup_cpu,
480 .teardown_cpu = xive_native_teardown_cpu,
481 .sync_source = xive_native_sync_source,
483 .get_ipi = xive_native_get_ipi,
484 .put_ipi = xive_native_put_ipi,
485 #endif /* CONFIG_SMP */
489 static bool xive_parse_provisioning(struct device_node *np)
493 if (of_property_read_u32(np, "ibm,xive-provision-page-size",
494 &xive_provision_size) < 0)
496 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
498 pr_err("Error %d getting provision chips array\n", rc);
501 xive_provision_chip_count = rc;
505 xive_provision_chips = kcalloc(4, xive_provision_chip_count,
507 if (WARN_ON(!xive_provision_chips))
510 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
511 xive_provision_chips,
512 xive_provision_chip_count);
514 pr_err("Error %d reading provision chips array\n", rc);
518 xive_provision_cache = kmem_cache_create("xive-provision",
522 if (!xive_provision_cache) {
523 pr_err("Failed to allocate provision cache\n");
529 static void xive_native_setup_pools(void)
531 /* Allocate a pool big enough */
532 pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
534 xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
535 if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
536 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
538 pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
539 xive_pool_vps, nr_cpu_ids);
542 u32 xive_native_default_eq_shift(void)
544 return xive_queue_shift;
546 EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
548 unsigned long xive_tima_os;
549 EXPORT_SYMBOL_GPL(xive_tima_os);
551 bool __init xive_native_init(void)
553 struct device_node *np;
556 struct property *prop;
562 if (xive_cmdline_disabled)
565 pr_devel("xive_native_init()\n");
566 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
568 pr_devel("not found !\n");
571 pr_devel("Found %pOF\n", np);
573 /* Resource 1 is HV window */
574 if (of_address_to_resource(np, 1, &r)) {
575 pr_err("Failed to get thread mgmnt area resource\n");
578 tima = ioremap(r.start, resource_size(&r));
580 pr_err("Failed to map thread mgmnt area\n");
584 /* Read number of priorities */
585 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
588 /* Iterate the EQ sizes and pick one */
589 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
590 xive_queue_shift = val;
591 if (val == PAGE_SHIFT)
595 /* Do we support single escalation */
596 if (of_get_property(np, "single-escalation-support", NULL) != NULL)
597 xive_has_single_esc = true;
599 /* Configure Thread Management areas for KVM */
600 for_each_possible_cpu(cpu)
601 kvmppc_set_xive_tima(cpu, r.start, tima);
603 /* Resource 2 is OS window */
604 if (of_address_to_resource(np, 2, &r)) {
605 pr_err("Failed to get thread mgmnt area resource\n");
609 xive_tima_os = r.start;
611 /* Grab size of provisionning pages */
612 xive_parse_provisioning(np);
614 /* Switch the XIVE to exploitation mode */
615 rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
617 pr_err("Switch to exploitation mode failed with error %lld\n", rc);
621 /* Setup some dummy HV pool VPs */
622 xive_native_setup_pools();
624 /* Initialize XIVE core with our backend */
625 if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
627 opal_xive_reset(OPAL_XIVE_MODE_EMU);
630 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
634 static bool xive_native_provision_pages(void)
639 for (i = 0; i < xive_provision_chip_count; i++) {
640 u32 chip = xive_provision_chips[i];
643 * XXX TODO: Try to make the allocation local to the node where
646 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
648 pr_err("Failed to allocate provisioning page\n");
652 opal_xive_donate_page(chip, __pa(p));
657 u32 xive_native_alloc_vp_block(u32 max_vcpus)
662 order = fls(max_vcpus) - 1;
663 if (max_vcpus > (1 << order))
666 pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
670 rc = opal_xive_alloc_vp_block(order);
673 msleep(OPAL_BUSY_DELAY_MS);
675 case OPAL_XIVE_PROVISIONING:
676 if (!xive_native_provision_pages())
677 return XIVE_INVALID_VP;
681 pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
683 return XIVE_INVALID_VP;
689 EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
691 void xive_native_free_vp_block(u32 vp_base)
695 if (vp_base == XIVE_INVALID_VP)
698 rc = opal_xive_free_vp_block(vp_base);
700 pr_warn("OPAL error %lld freeing VP block\n", rc);
702 EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
704 int xive_native_enable_vp(u32 vp_id, bool single_escalation)
707 u64 flags = OPAL_XIVE_VP_ENABLED;
709 if (single_escalation)
710 flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
712 rc = opal_xive_set_vp_info(vp_id, flags, 0);
715 msleep(OPAL_BUSY_DELAY_MS);
717 return rc ? -EIO : 0;
719 EXPORT_SYMBOL_GPL(xive_native_enable_vp);
721 int xive_native_disable_vp(u32 vp_id)
726 rc = opal_xive_set_vp_info(vp_id, 0, 0);
729 msleep(OPAL_BUSY_DELAY_MS);
731 return rc ? -EIO : 0;
733 EXPORT_SYMBOL_GPL(xive_native_disable_vp);
735 int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
738 __be32 vp_chip_id_be;
741 rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
744 *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
745 *out_chip_id = be32_to_cpu(vp_chip_id_be);
749 EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
751 bool xive_native_has_single_escalation(void)
753 return xive_has_single_esc;
755 EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
757 int xive_native_get_queue_info(u32 vp_id, u32 prio,
761 u32 *out_escalate_irq,
771 rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
772 &qeoi_page, &escalate_irq, &qflags);
774 pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
780 *out_qpage = be64_to_cpu(qpage);
782 *out_qsize = be32_to_cpu(qsize);
784 *out_qeoi_page = be64_to_cpu(qeoi_page);
785 if (out_escalate_irq)
786 *out_escalate_irq = be32_to_cpu(escalate_irq);
788 *out_qflags = be64_to_cpu(qflags);
792 EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
794 int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
800 rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
803 pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
809 *qtoggle = be32_to_cpu(opal_qtoggle);
811 *qindex = be32_to_cpu(opal_qindex);
815 EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
817 int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
821 rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
823 pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
830 EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
832 bool xive_native_has_queue_state_support(void)
834 return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
835 opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
837 EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
839 int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
844 rc = opal_xive_get_vp_state(vp_id, &state);
846 pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
852 *out_state = be64_to_cpu(state);
855 EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
857 machine_arch_initcall(powernv, xive_core_debug_init);