1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2016,2017 IBM Corporation.
6 #define pr_fmt(fmt) "xive: " fmt
8 #include <linux/types.h>
10 #include <linux/debugfs.h>
11 #include <linux/smp.h>
12 #include <linux/interrupt.h>
13 #include <linux/seq_file.h>
14 #include <linux/init.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/delay.h>
19 #include <linux/cpumask.h>
26 #include <asm/errno.h>
28 #include <asm/xive-regs.h>
30 #include <asm/kvm_ppc.h>
32 #include "xive-internal.h"
35 static u32 xive_provision_size;
36 static u32 *xive_provision_chips;
37 static u32 xive_provision_chip_count;
38 static u32 xive_queue_shift;
39 static u32 xive_pool_vps = XIVE_INVALID_VP;
40 static struct kmem_cache *xive_provision_cache;
41 static bool xive_has_single_esc;
43 int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
45 __be64 flags, eoi_page, trig_page;
46 __be32 esb_shift, src_chip;
50 memset(data, 0, sizeof(*data));
52 rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
53 &esb_shift, &src_chip);
55 pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
60 opal_flags = be64_to_cpu(flags);
61 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
62 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
63 if (opal_flags & OPAL_XIVE_IRQ_LSI)
64 data->flags |= XIVE_IRQ_FLAG_LSI;
65 if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
66 data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
67 if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
68 data->flags |= XIVE_IRQ_FLAG_MASK_FW;
69 if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
70 data->flags |= XIVE_IRQ_FLAG_EOI_FW;
71 data->eoi_page = be64_to_cpu(eoi_page);
72 data->trig_page = be64_to_cpu(trig_page);
73 data->esb_shift = be32_to_cpu(esb_shift);
74 data->src_chip = be32_to_cpu(src_chip);
76 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
77 if (!data->eoi_mmio) {
78 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
82 data->hw_irq = hw_irq;
86 if (data->trig_page == data->eoi_page) {
87 data->trig_mmio = data->eoi_mmio;
91 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
92 if (!data->trig_mmio) {
93 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
98 EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
100 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
105 rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
108 msleep(OPAL_BUSY_DELAY_MS);
110 return rc == 0 ? 0 : -ENXIO;
112 EXPORT_SYMBOL_GPL(xive_native_configure_irq);
114 static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
121 rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
123 *target = be64_to_cpu(vp);
124 *sw_irq = be32_to_cpu(lirq);
126 return rc == 0 ? 0 : -ENXIO;
129 /* This can be called multiple time to change a queue configuration */
130 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
131 __be32 *qpage, u32 order, bool can_escalate)
136 u64 flags, qpage_phys;
138 /* If there's an actual queue page, clean it */
142 qpage_phys = __pa(qpage);
146 /* Initialize the rest of the fields */
147 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
151 rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
156 pr_err("Error %lld getting queue info prio %d\n", rc, prio);
160 q->eoi_phys = be64_to_cpu(qeoi_page_be);
163 flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
165 /* Escalation needed ? */
167 q->esc_irq = be32_to_cpu(esc_irq_be);
168 flags |= OPAL_XIVE_EQ_ESCALATE;
171 /* Configure and enable the queue in HW */
173 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
176 msleep(OPAL_BUSY_DELAY_MS);
179 pr_err("Error %lld setting queue for prio %d\n", rc, prio);
183 * KVM code requires all of the above to be visible before
184 * q->qpage is set due to how it manages IPI EOIs
192 EXPORT_SYMBOL_GPL(xive_native_configure_queue);
194 static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
198 /* Disable the queue in HW */
200 rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
203 msleep(OPAL_BUSY_DELAY_MS);
206 pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
209 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
211 __xive_native_disable_queue(vp_id, q, prio);
213 EXPORT_SYMBOL_GPL(xive_native_disable_queue);
215 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
217 struct xive_q *q = &xc->queue[prio];
220 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
222 return PTR_ERR(qpage);
224 return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
225 q, prio, qpage, xive_queue_shift, false);
228 static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
230 struct xive_q *q = &xc->queue[prio];
231 unsigned int alloc_order;
234 * We use the variant with no iounmap as this is called on exec
235 * from an IPI and iounmap isn't safe
237 __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
238 alloc_order = xive_alloc_order(xive_queue_shift);
239 free_pages((unsigned long)q->qpage, alloc_order);
243 static bool xive_native_match(struct device_node *node)
245 return of_device_is_compatible(node, "ibm,opal-xive-vc");
248 static s64 opal_xive_allocate_irq(u32 chip_id)
250 s64 irq = opal_xive_allocate_irq_raw(chip_id);
253 * Old versions of skiboot can incorrectly return 0xffffffff to
254 * indicate no space, fix it up here.
256 return irq == 0xffffffff ? OPAL_RESOURCE : irq;
260 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
264 /* Allocate an IPI and populate info about it */
266 irq = opal_xive_allocate_irq(xc->chip_id);
267 if (irq == OPAL_BUSY) {
268 msleep(OPAL_BUSY_DELAY_MS);
272 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
280 #endif /* CONFIG_SMP */
282 u32 xive_native_alloc_irq(void)
287 rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
290 msleep(OPAL_BUSY_DELAY_MS);
296 EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
298 void xive_native_free_irq(u32 irq)
301 s64 rc = opal_xive_free_irq(irq);
304 msleep(OPAL_BUSY_DELAY_MS);
307 EXPORT_SYMBOL_GPL(xive_native_free_irq);
310 static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
318 rc = opal_xive_free_irq(xc->hw_ipi);
319 if (rc == OPAL_BUSY) {
320 msleep(OPAL_BUSY_DELAY_MS);
327 #endif /* CONFIG_SMP */
329 static void xive_native_shutdown(void)
331 /* Switch the XIVE to emulation mode */
332 opal_xive_reset(OPAL_XIVE_MODE_EMU);
336 * Perform an "ack" cycle on the current thread, thus
337 * grabbing the pending active priorities and updating
338 * the CPPR to the most favored one.
340 static void xive_native_update_pending(struct xive_cpu *xc)
345 /* Perform the acknowledge hypervisor to register cycle */
346 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
348 /* Synchronize subsequent queue accesses */
352 * Grab the CPPR and the "HE" field which indicates the source
353 * of the hypervisor interrupt (if any)
356 he = (ack >> 8) >> 6;
358 case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
360 case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
363 /* Mark the priority pending */
364 xc->pending_prio |= 1 << cppr;
367 * A new interrupt should never have a CPPR less favored
368 * than our current one.
370 if (cppr >= xc->cppr)
371 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
372 smp_processor_id(), cppr, xc->cppr);
374 /* Update our idea of what the CPPR is */
377 case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
378 case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
379 pr_err("CPU %d got unexpected interrupt type HE=%d\n",
380 smp_processor_id(), he);
385 static void xive_native_eoi(u32 hw_irq)
388 * Not normally used except if specific interrupts need
389 * a workaround on EOI.
391 opal_int_eoi(hw_irq);
394 static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
401 if (xive_pool_vps == XIVE_INVALID_VP)
404 /* Check if pool VP already active, if it is, pull it */
405 if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
406 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
408 /* Enable the pool VP */
409 vp = xive_pool_vps + cpu;
411 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
414 msleep(OPAL_BUSY_DELAY_MS);
417 pr_err("Failed to enable pool VP on CPU %d\n", cpu);
421 /* Grab it's CAM value */
422 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
424 pr_err("Failed to get pool VP info CPU %d\n", cpu);
427 vp_cam = be64_to_cpu(vp_cam_be);
429 /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
430 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
431 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
434 static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
439 if (xive_pool_vps == XIVE_INVALID_VP)
442 /* Pull the pool VP from the CPU */
443 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
446 vp = xive_pool_vps + cpu;
448 rc = opal_xive_set_vp_info(vp, 0, 0);
451 msleep(OPAL_BUSY_DELAY_MS);
455 void xive_native_sync_source(u32 hw_irq)
457 opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
459 EXPORT_SYMBOL_GPL(xive_native_sync_source);
461 void xive_native_sync_queue(u32 hw_irq)
463 opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
465 EXPORT_SYMBOL_GPL(xive_native_sync_queue);
467 static const struct xive_ops xive_native_ops = {
468 .populate_irq_data = xive_native_populate_irq_data,
469 .configure_irq = xive_native_configure_irq,
470 .get_irq_config = xive_native_get_irq_config,
471 .setup_queue = xive_native_setup_queue,
472 .cleanup_queue = xive_native_cleanup_queue,
473 .match = xive_native_match,
474 .shutdown = xive_native_shutdown,
475 .update_pending = xive_native_update_pending,
476 .eoi = xive_native_eoi,
477 .setup_cpu = xive_native_setup_cpu,
478 .teardown_cpu = xive_native_teardown_cpu,
479 .sync_source = xive_native_sync_source,
481 .get_ipi = xive_native_get_ipi,
482 .put_ipi = xive_native_put_ipi,
483 #endif /* CONFIG_SMP */
487 static bool xive_parse_provisioning(struct device_node *np)
491 if (of_property_read_u32(np, "ibm,xive-provision-page-size",
492 &xive_provision_size) < 0)
494 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
496 pr_err("Error %d getting provision chips array\n", rc);
499 xive_provision_chip_count = rc;
503 xive_provision_chips = kcalloc(4, xive_provision_chip_count,
505 if (WARN_ON(!xive_provision_chips))
508 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
509 xive_provision_chips,
510 xive_provision_chip_count);
512 pr_err("Error %d reading provision chips array\n", rc);
516 xive_provision_cache = kmem_cache_create("xive-provision",
520 if (!xive_provision_cache) {
521 pr_err("Failed to allocate provision cache\n");
527 static void xive_native_setup_pools(void)
529 /* Allocate a pool big enough */
530 pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
532 xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
533 if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
534 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
536 pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
537 xive_pool_vps, nr_cpu_ids);
540 u32 xive_native_default_eq_shift(void)
542 return xive_queue_shift;
544 EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
546 unsigned long xive_tima_os;
547 EXPORT_SYMBOL_GPL(xive_tima_os);
549 bool __init xive_native_init(void)
551 struct device_node *np;
554 struct property *prop;
560 if (xive_cmdline_disabled)
563 pr_devel("xive_native_init()\n");
564 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
566 pr_devel("not found !\n");
569 pr_devel("Found %pOF\n", np);
571 /* Resource 1 is HV window */
572 if (of_address_to_resource(np, 1, &r)) {
573 pr_err("Failed to get thread mgmnt area resource\n");
576 tima = ioremap(r.start, resource_size(&r));
578 pr_err("Failed to map thread mgmnt area\n");
582 /* Read number of priorities */
583 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
586 /* Iterate the EQ sizes and pick one */
587 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
588 xive_queue_shift = val;
589 if (val == PAGE_SHIFT)
593 /* Do we support single escalation */
594 if (of_get_property(np, "single-escalation-support", NULL) != NULL)
595 xive_has_single_esc = true;
597 /* Configure Thread Management areas for KVM */
598 for_each_possible_cpu(cpu)
599 kvmppc_set_xive_tima(cpu, r.start, tima);
601 /* Resource 2 is OS window */
602 if (of_address_to_resource(np, 2, &r)) {
603 pr_err("Failed to get thread mgmnt area resource\n");
607 xive_tima_os = r.start;
609 /* Grab size of provisionning pages */
610 xive_parse_provisioning(np);
612 /* Switch the XIVE to exploitation mode */
613 rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
615 pr_err("Switch to exploitation mode failed with error %lld\n", rc);
619 /* Setup some dummy HV pool VPs */
620 xive_native_setup_pools();
622 /* Initialize XIVE core with our backend */
623 if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
625 opal_xive_reset(OPAL_XIVE_MODE_EMU);
628 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
632 static bool xive_native_provision_pages(void)
637 for (i = 0; i < xive_provision_chip_count; i++) {
638 u32 chip = xive_provision_chips[i];
641 * XXX TODO: Try to make the allocation local to the node where
644 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
646 pr_err("Failed to allocate provisioning page\n");
649 opal_xive_donate_page(chip, __pa(p));
654 u32 xive_native_alloc_vp_block(u32 max_vcpus)
659 order = fls(max_vcpus) - 1;
660 if (max_vcpus > (1 << order))
663 pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
667 rc = opal_xive_alloc_vp_block(order);
670 msleep(OPAL_BUSY_DELAY_MS);
672 case OPAL_XIVE_PROVISIONING:
673 if (!xive_native_provision_pages())
674 return XIVE_INVALID_VP;
678 pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
680 return XIVE_INVALID_VP;
686 EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
688 void xive_native_free_vp_block(u32 vp_base)
692 if (vp_base == XIVE_INVALID_VP)
695 rc = opal_xive_free_vp_block(vp_base);
697 pr_warn("OPAL error %lld freeing VP block\n", rc);
699 EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
701 int xive_native_enable_vp(u32 vp_id, bool single_escalation)
704 u64 flags = OPAL_XIVE_VP_ENABLED;
706 if (single_escalation)
707 flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
709 rc = opal_xive_set_vp_info(vp_id, flags, 0);
712 msleep(OPAL_BUSY_DELAY_MS);
714 return rc ? -EIO : 0;
716 EXPORT_SYMBOL_GPL(xive_native_enable_vp);
718 int xive_native_disable_vp(u32 vp_id)
723 rc = opal_xive_set_vp_info(vp_id, 0, 0);
726 msleep(OPAL_BUSY_DELAY_MS);
728 return rc ? -EIO : 0;
730 EXPORT_SYMBOL_GPL(xive_native_disable_vp);
732 int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
735 __be32 vp_chip_id_be;
738 rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
741 *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
742 *out_chip_id = be32_to_cpu(vp_chip_id_be);
746 EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
748 bool xive_native_has_single_escalation(void)
750 return xive_has_single_esc;
752 EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
754 int xive_native_get_queue_info(u32 vp_id, u32 prio,
758 u32 *out_escalate_irq,
768 rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
769 &qeoi_page, &escalate_irq, &qflags);
771 pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
777 *out_qpage = be64_to_cpu(qpage);
779 *out_qsize = be32_to_cpu(qsize);
781 *out_qeoi_page = be64_to_cpu(qeoi_page);
782 if (out_escalate_irq)
783 *out_escalate_irq = be32_to_cpu(escalate_irq);
785 *out_qflags = be64_to_cpu(qflags);
789 EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
791 int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
797 rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
800 pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
806 *qtoggle = be32_to_cpu(opal_qtoggle);
808 *qindex = be32_to_cpu(opal_qindex);
812 EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
814 int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
818 rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
820 pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
827 EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
829 bool xive_native_has_queue_state_support(void)
831 return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
832 opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
834 EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
836 int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
841 rc = opal_xive_get_vp_state(vp_id, &state);
843 pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
849 *out_state = be64_to_cpu(state);
852 EXPORT_SYMBOL_GPL(xive_native_get_vp_state);