2 * QEMU PowerPC XIVE interrupt controller model
4 * Copyright (c) 2017-2018, IBM Corporation.
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "target/ppc/cpu.h"
15 #include "sysemu/cpus.h"
16 #include "sysemu/dma.h"
17 #include "sysemu/reset.h"
18 #include "hw/qdev-properties.h"
19 #include "migration/vmstate.h"
20 #include "monitor/monitor.h"
22 #include "hw/ppc/xive.h"
23 #include "hw/ppc/xive_regs.h"
26 * XIVE Thread Interrupt Management context
30 * Convert a priority number to an Interrupt Pending Buffer (IPB)
31 * register, which indicates a pending interrupt at the priority
32 * corresponding to the bit number
34 static uint8_t priority_to_ipb(uint8_t priority)
36 return priority > XIVE_PRIORITY_MAX ?
37 0 : 1 << (XIVE_PRIORITY_MAX - priority);
41 * Convert an Interrupt Pending Buffer (IPB) register to a Pending
42 * Interrupt Priority Register (PIPR), which contains the priority of
43 * the most favored pending notification.
45 static uint8_t ipb_to_pipr(uint8_t ibp)
47 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
50 static void ipb_update(uint8_t *regs, uint8_t priority)
52 regs[TM_IPB] |= priority_to_ipb(priority);
53 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
56 static uint8_t exception_mask(uint8_t ring)
64 g_assert_not_reached();
68 static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
72 return 0; /* Not supported */
74 return tctx->os_output;
77 return tctx->hv_output;
83 static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
85 uint8_t *regs = &tctx->regs[ring];
86 uint8_t nsr = regs[TM_NSR];
87 uint8_t mask = exception_mask(ring);
89 qemu_irq_lower(xive_tctx_output(tctx, ring));
91 if (regs[TM_NSR] & mask) {
92 uint8_t cppr = regs[TM_PIPR];
96 /* Reset the pending buffer bit */
97 regs[TM_IPB] &= ~priority_to_ipb(cppr);
98 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
100 /* Drop Exception bit */
101 regs[TM_NSR] &= ~mask;
104 return (nsr << 8) | regs[TM_CPPR];
107 static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
109 uint8_t *regs = &tctx->regs[ring];
111 if (regs[TM_PIPR] < regs[TM_CPPR]) {
114 regs[TM_NSR] |= TM_QW1_NSR_EO;
117 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
120 g_assert_not_reached();
122 qemu_irq_raise(xive_tctx_output(tctx, ring));
126 static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
128 if (cppr > XIVE_PRIORITY_MAX) {
132 tctx->regs[ring + TM_CPPR] = cppr;
134 /* CPPR has changed, check if we need to raise a pending exception */
135 xive_tctx_notify(tctx, ring);
138 static inline uint32_t xive_tctx_word2(uint8_t *ring)
140 return *((uint32_t *) &ring[TM_WORD2]);
144 * XIVE Thread Interrupt Management Area (TIMA)
147 static void xive_tm_set_hv_cppr(XiveTCTX *tctx, hwaddr offset,
148 uint64_t value, unsigned size)
150 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
153 static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
155 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
158 static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset,
161 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
164 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
165 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
169 static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset,
170 uint64_t value, unsigned size)
172 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
175 static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size)
177 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
181 * Define an access map for each page of the TIMA that we will use in
182 * the memory region ops to filter values when doing loads and stores
183 * of raw registers values
185 * Registers accessibility bits :
193 static const uint8_t xive_tm_hw_view[] = {
194 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
195 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
196 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
197 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
200 static const uint8_t xive_tm_hv_view[] = {
201 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
202 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
203 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
204 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
207 static const uint8_t xive_tm_os_view[] = {
208 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
209 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
210 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
211 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
214 static const uint8_t xive_tm_user_view[] = {
215 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */
216 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
218 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
222 * Overall TIMA access map for the thread interrupt management context
225 static const uint8_t *xive_tm_views[] = {
226 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
227 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
228 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
229 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
233 * Computes a register access mask for a given offset in the TIMA
235 static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
237 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
238 uint8_t reg_offset = offset & 0x3F;
239 uint8_t reg_mask = write ? 0x1 : 0x2;
243 for (i = 0; i < size; i++) {
244 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
245 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
252 static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
255 uint8_t ring_offset = offset & 0x30;
256 uint8_t reg_offset = offset & 0x3F;
257 uint64_t mask = xive_tm_mask(offset, size, true);
261 * Only 4 or 8 bytes stores are allowed and the User ring is
264 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
265 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
266 HWADDR_PRIx"\n", offset);
271 * Use the register offset for the raw values and filter out
274 for (i = 0; i < size; i++) {
275 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
277 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
283 static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
285 uint8_t ring_offset = offset & 0x30;
286 uint8_t reg_offset = offset & 0x3F;
287 uint64_t mask = xive_tm_mask(offset, size, false);
292 * Only 4 or 8 bytes loads are allowed and the User ring is
295 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
296 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
297 HWADDR_PRIx"\n", offset);
301 /* Use the register offset for the raw values */
303 for (i = 0; i < size; i++) {
304 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
307 /* filter out reserved values */
312 * The TM context is mapped twice within each page. Stores and loads
313 * to the first mapping below 2K write and read the specified values
314 * without modification. The second mapping above 2K performs specific
315 * state changes (side effects) in addition to setting/returning the
316 * interrupt management area context of the processor thread.
318 static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
320 return xive_tctx_accept(tctx, TM_QW1_OS);
323 static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset,
324 uint64_t value, unsigned size)
326 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
330 * Adjust the IPB to allow a CPU to process event queues of other
331 * priorities during one physical interrupt cycle.
333 static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset,
334 uint64_t value, unsigned size)
336 ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff);
337 xive_tctx_notify(tctx, TM_QW1_OS);
340 static uint64_t xive_tm_pull_os_ctx(XiveTCTX *tctx, hwaddr offset,
343 uint32_t qw1w2_prev = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
346 qw1w2 = xive_set_field32(TM_QW1W2_VO, qw1w2_prev, 0);
347 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
352 * Define a mapping of "special" operations depending on the TIMA page
353 * offset and the size of the operation.
355 typedef struct XiveTmOp {
359 void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value,
361 uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size);
364 static const XiveTmOp xive_tm_operations[] = {
366 * MMIOs below 2K : raw values and special operations without side
369 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
370 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
371 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
372 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
374 /* MMIOs above 2K : special operations with side effects */
375 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
376 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
377 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
378 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
379 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
380 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
381 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
384 static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
386 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
387 uint32_t op_offset = offset & 0xFFF;
390 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
391 const XiveTmOp *xto = &xive_tm_operations[i];
393 /* Accesses done from a more privileged TIMA page is allowed */
394 if (xto->page_offset >= page_offset &&
395 xto->op_offset == op_offset &&
397 ((write && xto->write_handler) || (!write && xto->read_handler))) {
407 void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
413 * TODO: check V bit in Q[0-3]W2
417 * First, check for special operations in the 2K region
419 if (offset & 0x800) {
420 xto = xive_tm_find_op(offset, size, true);
422 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
423 "@%"HWADDR_PRIx"\n", offset);
425 xto->write_handler(tctx, offset, value, size);
431 * Then, for special operations in the region below 2K.
433 xto = xive_tm_find_op(offset, size, true);
435 xto->write_handler(tctx, offset, value, size);
440 * Finish with raw access to the register values
442 xive_tm_raw_write(tctx, offset, value, size);
445 uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
450 * TODO: check V bit in Q[0-3]W2
454 * First, check for special operations in the 2K region
456 if (offset & 0x800) {
457 xto = xive_tm_find_op(offset, size, false);
459 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
460 "@%"HWADDR_PRIx"\n", offset);
463 return xto->read_handler(tctx, offset, size);
467 * Then, for special operations in the region below 2K.
469 xto = xive_tm_find_op(offset, size, false);
471 return xto->read_handler(tctx, offset, size);
475 * Finish with raw access to the register values
477 return xive_tm_raw_read(tctx, offset, size);
480 static void xive_tm_write(void *opaque, hwaddr offset,
481 uint64_t value, unsigned size)
483 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
485 xive_tctx_tm_write(tctx, offset, value, size);
488 static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size)
490 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
492 return xive_tctx_tm_read(tctx, offset, size);
495 const MemoryRegionOps xive_tm_ops = {
496 .read = xive_tm_read,
497 .write = xive_tm_write,
498 .endianness = DEVICE_BIG_ENDIAN,
500 .min_access_size = 1,
501 .max_access_size = 8,
504 .min_access_size = 1,
505 .max_access_size = 8,
509 static char *xive_tctx_ring_print(uint8_t *ring)
511 uint32_t w2 = xive_tctx_word2(ring);
513 return g_strdup_printf("%02x %02x %02x %02x %02x "
514 "%02x %02x %02x %08x",
515 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
516 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
520 static const char * const xive_tctx_ring_names[] = {
521 "USER", "OS", "POOL", "PHYS",
524 void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
529 /* Skip partially initialized vCPUs. This can happen on sPAPR when vCPUs
530 * are hot plugged or unplugged.
536 cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
538 if (kvm_irqchip_in_kernel()) {
539 Error *local_err = NULL;
541 kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
543 error_report_err(local_err);
548 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
551 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
552 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
553 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
554 xive_tctx_ring_names[i], s);
559 void xive_tctx_reset(XiveTCTX *tctx)
561 memset(tctx->regs, 0, sizeof(tctx->regs));
563 /* Set some defaults */
564 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
565 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
566 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
569 * Initialize PIPR to 0xFF to avoid phantom interrupts when the
572 tctx->regs[TM_QW1_OS + TM_PIPR] =
573 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
574 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
575 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
578 static void xive_tctx_realize(DeviceState *dev, Error **errp)
580 XiveTCTX *tctx = XIVE_TCTX(dev);
584 Error *local_err = NULL;
586 obj = object_property_get_link(OBJECT(dev), "cpu", &local_err);
588 error_propagate(errp, local_err);
589 error_prepend(errp, "required link 'cpu' not found: ");
593 cpu = POWERPC_CPU(obj);
597 switch (PPC_INPUT(env)) {
598 case PPC_FLAGS_INPUT_POWER9:
599 tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT];
600 tctx->os_output = env->irq_inputs[POWER9_INPUT_INT];
604 error_setg(errp, "XIVE interrupt controller does not support "
605 "this CPU bus model");
609 /* Connect the presenter to the VCPU (required for CPU hotplug) */
610 if (kvm_irqchip_in_kernel()) {
611 kvmppc_xive_cpu_connect(tctx, &local_err);
613 error_propagate(errp, local_err);
619 static int vmstate_xive_tctx_pre_save(void *opaque)
621 Error *local_err = NULL;
623 if (kvm_irqchip_in_kernel()) {
624 kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err);
626 error_report_err(local_err);
634 static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
636 Error *local_err = NULL;
638 if (kvm_irqchip_in_kernel()) {
640 * Required for hotplugged CPU, for which the state comes
641 * after all states of the machine.
643 kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err);
645 error_report_err(local_err);
653 static const VMStateDescription vmstate_xive_tctx = {
654 .name = TYPE_XIVE_TCTX,
656 .minimum_version_id = 1,
657 .pre_save = vmstate_xive_tctx_pre_save,
658 .post_load = vmstate_xive_tctx_post_load,
659 .fields = (VMStateField[]) {
660 VMSTATE_BUFFER(regs, XiveTCTX),
661 VMSTATE_END_OF_LIST()
665 static void xive_tctx_class_init(ObjectClass *klass, void *data)
667 DeviceClass *dc = DEVICE_CLASS(klass);
669 dc->desc = "XIVE Interrupt Thread Context";
670 dc->realize = xive_tctx_realize;
671 dc->vmsd = &vmstate_xive_tctx;
673 * Reason: part of XIVE interrupt controller, needs to be wired up
674 * by xive_tctx_create().
676 dc->user_creatable = false;
679 static const TypeInfo xive_tctx_info = {
680 .name = TYPE_XIVE_TCTX,
681 .parent = TYPE_DEVICE,
682 .instance_size = sizeof(XiveTCTX),
683 .class_init = xive_tctx_class_init,
686 Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp)
688 Error *local_err = NULL;
691 obj = object_new(TYPE_XIVE_TCTX);
692 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort);
695 object_property_add_const_link(obj, "cpu", cpu, &error_abort);
696 object_property_set_bool(obj, true, "realized", &local_err);
704 object_unparent(obj);
705 error_propagate(errp, local_err);
709 void xive_tctx_destroy(XiveTCTX *tctx)
711 Object *obj = OBJECT(tctx);
713 object_unref(object_property_get_link(obj, "cpu", &error_abort));
714 object_unparent(obj);
721 static uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
723 uint8_t old_pq = *pq & 0x3;
731 static bool xive_esb_trigger(uint8_t *pq)
733 uint8_t old_pq = *pq & 0x3;
737 xive_esb_set(pq, XIVE_ESB_PENDING);
739 case XIVE_ESB_PENDING:
740 case XIVE_ESB_QUEUED:
741 xive_esb_set(pq, XIVE_ESB_QUEUED);
744 xive_esb_set(pq, XIVE_ESB_OFF);
747 g_assert_not_reached();
751 static bool xive_esb_eoi(uint8_t *pq)
753 uint8_t old_pq = *pq & 0x3;
757 case XIVE_ESB_PENDING:
758 xive_esb_set(pq, XIVE_ESB_RESET);
760 case XIVE_ESB_QUEUED:
761 xive_esb_set(pq, XIVE_ESB_PENDING);
764 xive_esb_set(pq, XIVE_ESB_OFF);
767 g_assert_not_reached();
772 * XIVE Interrupt Source (or IVSE)
775 uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
777 assert(srcno < xsrc->nr_irqs);
779 return xsrc->status[srcno] & 0x3;
782 uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
784 assert(srcno < xsrc->nr_irqs);
786 return xive_esb_set(&xsrc->status[srcno], pq);
790 * Returns whether the event notification should be forwarded.
792 static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
794 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
796 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
800 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
808 * Returns whether the event notification should be forwarded.
810 static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
814 assert(srcno < xsrc->nr_irqs);
816 ret = xive_esb_trigger(&xsrc->status[srcno]);
818 if (xive_source_irq_is_lsi(xsrc, srcno) &&
819 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
820 qemu_log_mask(LOG_GUEST_ERROR,
821 "XIVE: queued an event on LSI IRQ %d\n", srcno);
828 * Returns whether the event notification should be forwarded.
830 static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
834 assert(srcno < xsrc->nr_irqs);
836 ret = xive_esb_eoi(&xsrc->status[srcno]);
839 * LSI sources do not set the Q bit but they can still be
840 * asserted, in which case we should forward a new event
843 if (xive_source_irq_is_lsi(xsrc, srcno) &&
844 xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
845 ret = xive_source_lsi_trigger(xsrc, srcno);
852 * Forward the source event notification to the Router
854 static void xive_source_notify(XiveSource *xsrc, int srcno)
856 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
859 xnc->notify(xsrc->xive, srcno);
864 * In a two pages ESB MMIO setting, even page is the trigger page, odd
865 * page is for management
867 static inline bool addr_is_even(hwaddr addr, uint32_t shift)
869 return !((addr >> shift) & 1);
872 static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
874 return xive_source_esb_has_2page(xsrc) &&
875 addr_is_even(addr, xsrc->esb_shift - 1);
880 * Trigger page Management/EOI page
882 * ESB MMIO setting 2 pages 1 or 2 pages
884 * 0x000 .. 0x3FF -1 EOI and return 0|1
885 * 0x400 .. 0x7FF -1 EOI and return 0|1
886 * 0x800 .. 0xBFF -1 return PQ
887 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
888 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
889 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
890 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
892 static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
894 XiveSource *xsrc = XIVE_SOURCE(opaque);
895 uint32_t offset = addr & 0xFFF;
896 uint32_t srcno = addr >> xsrc->esb_shift;
899 /* In a two pages ESB MMIO setting, trigger page should not be read */
900 if (xive_source_is_trigger_page(xsrc, addr)) {
901 qemu_log_mask(LOG_GUEST_ERROR,
902 "XIVE: invalid load on IRQ %d trigger page at "
903 "0x%"HWADDR_PRIx"\n", srcno, addr);
908 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
909 ret = xive_source_esb_eoi(xsrc, srcno);
911 /* Forward the source event notification for routing */
913 xive_source_notify(xsrc, srcno);
917 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
918 ret = xive_source_esb_get(xsrc, srcno);
921 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
922 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
923 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
924 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
925 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
928 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
937 * Trigger page Management/EOI page
939 * ESB MMIO setting 2 pages 1 or 2 pages
941 * 0x000 .. 0x3FF Trigger Trigger
942 * 0x400 .. 0x7FF Trigger EOI
943 * 0x800 .. 0xBFF Trigger undefined
944 * 0xC00 .. 0xCFF Trigger PQ=00
945 * 0xD00 .. 0xDFF Trigger PQ=01
946 * 0xE00 .. 0xDFF Trigger PQ=10
947 * 0xF00 .. 0xDFF Trigger PQ=11
949 static void xive_source_esb_write(void *opaque, hwaddr addr,
950 uint64_t value, unsigned size)
952 XiveSource *xsrc = XIVE_SOURCE(opaque);
953 uint32_t offset = addr & 0xFFF;
954 uint32_t srcno = addr >> xsrc->esb_shift;
957 /* In a two pages ESB MMIO setting, trigger page only triggers */
958 if (xive_source_is_trigger_page(xsrc, addr)) {
959 notify = xive_source_esb_trigger(xsrc, srcno);
965 notify = xive_source_esb_trigger(xsrc, srcno);
968 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
969 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
970 qemu_log_mask(LOG_GUEST_ERROR,
971 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
975 notify = xive_source_esb_eoi(xsrc, srcno);
978 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
979 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
980 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
981 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
982 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
986 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
992 /* Forward the source event notification for routing */
994 xive_source_notify(xsrc, srcno);
998 static const MemoryRegionOps xive_source_esb_ops = {
999 .read = xive_source_esb_read,
1000 .write = xive_source_esb_write,
1001 .endianness = DEVICE_BIG_ENDIAN,
1003 .min_access_size = 8,
1004 .max_access_size = 8,
1007 .min_access_size = 8,
1008 .max_access_size = 8,
1012 void xive_source_set_irq(void *opaque, int srcno, int val)
1014 XiveSource *xsrc = XIVE_SOURCE(opaque);
1015 bool notify = false;
1017 if (xive_source_irq_is_lsi(xsrc, srcno)) {
1019 notify = xive_source_lsi_trigger(xsrc, srcno);
1021 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
1025 notify = xive_source_esb_trigger(xsrc, srcno);
1029 /* Forward the source event notification for routing */
1031 xive_source_notify(xsrc, srcno);
1035 void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
1039 for (i = 0; i < xsrc->nr_irqs; i++) {
1040 uint8_t pq = xive_source_esb_get(xsrc, i);
1042 if (pq == XIVE_ESB_OFF) {
1046 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
1047 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
1048 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1049 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1050 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ');
1054 static void xive_source_reset(void *dev)
1056 XiveSource *xsrc = XIVE_SOURCE(dev);
1058 /* Do not clear the LSI bitmap */
1060 /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */
1061 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
1064 static void xive_source_realize(DeviceState *dev, Error **errp)
1066 XiveSource *xsrc = XIVE_SOURCE(dev);
1068 Error *local_err = NULL;
1070 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1072 error_propagate(errp, local_err);
1073 error_prepend(errp, "required link 'xive' not found: ");
1077 xsrc->xive = XIVE_NOTIFIER(obj);
1079 if (!xsrc->nr_irqs) {
1080 error_setg(errp, "Number of interrupt needs to be greater than 0");
1084 if (xsrc->esb_shift != XIVE_ESB_4K &&
1085 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
1086 xsrc->esb_shift != XIVE_ESB_64K &&
1087 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
1088 error_setg(errp, "Invalid ESB shift setting");
1092 xsrc->status = g_malloc0(xsrc->nr_irqs);
1093 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
1095 if (!kvm_irqchip_in_kernel()) {
1096 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1097 &xive_source_esb_ops, xsrc, "xive.esb",
1098 (1ull << xsrc->esb_shift) * xsrc->nr_irqs);
1101 qemu_register_reset(xive_source_reset, dev);
1104 static const VMStateDescription vmstate_xive_source = {
1105 .name = TYPE_XIVE_SOURCE,
1107 .minimum_version_id = 1,
1108 .fields = (VMStateField[]) {
1109 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1110 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1111 VMSTATE_END_OF_LIST()
1116 * The default XIVE interrupt source setting for the ESB MMIOs is two
1117 * 64k pages without Store EOI, to be in sync with KVM.
1119 static Property xive_source_properties[] = {
1120 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1121 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1122 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1123 DEFINE_PROP_END_OF_LIST(),
1126 static void xive_source_class_init(ObjectClass *klass, void *data)
1128 DeviceClass *dc = DEVICE_CLASS(klass);
1130 dc->desc = "XIVE Interrupt Source";
1131 dc->props = xive_source_properties;
1132 dc->realize = xive_source_realize;
1133 dc->vmsd = &vmstate_xive_source;
1135 * Reason: part of XIVE interrupt controller, needs to be wired up,
1136 * e.g. by spapr_xive_instance_init().
1138 dc->user_creatable = false;
1141 static const TypeInfo xive_source_info = {
1142 .name = TYPE_XIVE_SOURCE,
1143 .parent = TYPE_DEVICE,
1144 .instance_size = sizeof(XiveSource),
1145 .class_init = xive_source_class_init,
1152 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
1154 uint64_t qaddr_base = xive_end_qaddr(end);
1155 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1156 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1157 uint32_t qentries = 1 << (qsize + 10);
1161 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
1163 monitor_printf(mon, " [ ");
1164 qindex = (qindex - (width - 1)) & (qentries - 1);
1165 for (i = 0; i < width; i++) {
1166 uint64_t qaddr = qaddr_base + (qindex << 2);
1167 uint32_t qdata = -1;
1169 if (dma_memory_read(&address_space_memory, qaddr, &qdata,
1171 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1172 HWADDR_PRIx "\n", qaddr);
1175 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1176 be32_to_cpu(qdata));
1177 qindex = (qindex + 1) & (qentries - 1);
1179 monitor_printf(mon, "]");
1182 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1184 uint64_t qaddr_base = xive_end_qaddr(end);
1185 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1186 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1187 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1188 uint32_t qentries = 1 << (qsize + 10);
1190 uint32_t nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
1191 uint32_t nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1192 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1195 if (!xive_end_is_valid(end)) {
1199 pq = xive_get_field32(END_W1_ESn, end->w1);
1201 monitor_printf(mon, " %08x %c%c %c%c%c%c%c%c%c prio:%d nvt:%02x/%04x",
1203 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1204 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1205 xive_end_is_valid(end) ? 'v' : '-',
1206 xive_end_is_enqueue(end) ? 'q' : '-',
1207 xive_end_is_notify(end) ? 'n' : '-',
1208 xive_end_is_backlog(end) ? 'b' : '-',
1209 xive_end_is_escalate(end) ? 'e' : '-',
1210 xive_end_is_uncond_escalation(end) ? 'u' : '-',
1211 xive_end_is_silent_escalation(end) ? 's' : '-',
1212 priority, nvt_blk, nvt_idx);
1215 monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
1216 qaddr_base, qindex, qentries, qgen);
1217 xive_end_queue_pic_print_info(end, 6, mon);
1219 monitor_printf(mon, "\n");
1222 static void xive_end_enqueue(XiveEND *end, uint32_t data)
1224 uint64_t qaddr_base = xive_end_qaddr(end);
1225 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1226 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1227 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1229 uint64_t qaddr = qaddr_base + (qindex << 2);
1230 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1231 uint32_t qentries = 1 << (qsize + 10);
1233 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
1234 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1235 HWADDR_PRIx "\n", qaddr);
1239 qindex = (qindex + 1) & (qentries - 1);
1242 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1244 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1247 void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx,
1250 XiveEAS *eas = (XiveEAS *) &end->w4;
1253 if (!xive_end_is_escalate(end)) {
1257 pq = xive_get_field32(END_W1_ESe, end->w1);
1259 monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
1261 pq & XIVE_ESB_VAL_P ? 'P' : '-',
1262 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1263 xive_eas_is_valid(eas) ? 'V' : ' ',
1264 xive_eas_is_masked(eas) ? 'M' : ' ',
1265 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1266 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1267 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1271 * XIVE Router (aka. Virtualization Controller or IVRE)
1274 int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1277 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1279 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1282 int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1285 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1287 return xrc->get_end(xrtr, end_blk, end_idx, end);
1290 int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1291 XiveEND *end, uint8_t word_number)
1293 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1295 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1298 int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1301 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1303 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1306 int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1307 XiveNVT *nvt, uint8_t word_number)
1309 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1311 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1314 XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs)
1316 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1318 return xrc->get_tctx(xrtr, cs);
1322 * Encode the HW CAM line in the block group mode format :
1324 * chip << 19 | 0000000 0 0001 thread (7Bit)
1326 static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx)
1328 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1329 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1331 return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f));
1335 * The thread context register words are in big-endian format.
1337 static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format,
1338 uint8_t nvt_blk, uint32_t nvt_idx,
1339 bool cam_ignore, uint32_t logic_serv)
1341 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
1342 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
1343 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1344 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1345 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1348 * TODO (PowerNV): ignore mode. The low order bits of the NVT
1349 * identifier are ignored in the "CAM" match.
1353 if (cam_ignore == true) {
1355 * F=0 & i=1: Logical server notification (bits ignored at
1356 * the end of the NVT identifier)
1358 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1363 /* F=0 & i=0: Specific NVT notification */
1366 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
1367 cam == xive_tctx_hw_cam_line(tctx)) {
1368 return TM_QW3_HV_PHYS;
1372 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1373 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1374 return TM_QW2_HV_POOL;
1378 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1379 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1383 /* F=1 : User level Event-Based Branch (EBB) notification */
1386 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1387 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1388 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1389 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1396 typedef struct XiveTCTXMatch {
1401 static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
1402 uint8_t nvt_blk, uint32_t nvt_idx,
1403 bool cam_ignore, uint8_t priority,
1404 uint32_t logic_serv, XiveTCTXMatch *match)
1409 * TODO (PowerNV): handle chip_id overwrite of block field for
1410 * hardwired CAM compares
1414 XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs);
1418 * Skip partially initialized vCPUs. This can happen when
1419 * vCPUs are hotplugged.
1426 * HW checks that the CPU is enabled in the Physical Thread
1427 * Enable Register (PTER).
1431 * Check the thread context CAM lines and record matches. We
1432 * will handle CPU exception delivery later
1434 ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx,
1435 cam_ignore, logic_serv);
1437 * Save the context and follow on to catch duplicates, that we
1438 * don't support yet.
1442 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
1443 "context NVT %x/%x\n", nvt_blk, nvt_idx);
1453 qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n",
1462 * This is our simple Xive Presenter Engine model. It is merged in the
1463 * Router as it does not require an extra object.
1465 * It receives notification requests sent by the IVRE to find one
1466 * matching NVT (or more) dispatched on the processor threads. In case
1467 * of a single NVT notification, the process is abreviated and the
1468 * thread is signaled if a match is found. In case of a logical server
1469 * notification (bits ignored at the end of the NVT identifier), the
1470 * IVPE and IVRE select a winning thread using different filters. This
1471 * involves 2 or 3 exchanges on the PowerBus that the model does not
1474 * The parameters represent what is sent on the PowerBus
1476 static bool xive_presenter_notify(XiveRouter *xrtr, uint8_t format,
1477 uint8_t nvt_blk, uint32_t nvt_idx,
1478 bool cam_ignore, uint8_t priority,
1479 uint32_t logic_serv)
1481 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1484 found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore,
1485 priority, logic_serv, &match);
1487 ipb_update(&match.tctx->regs[match.ring], priority);
1488 xive_tctx_notify(match.tctx, match.ring);
1495 * Notification using the END ESe/ESn bit (Event State Buffer for
1496 * escalation and notification). Profide futher coalescing in the
1499 static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
1500 uint32_t end_idx, XiveEND *end,
1501 uint32_t end_esmask)
1503 uint8_t pq = xive_get_field32(end_esmask, end->w1);
1504 bool notify = xive_esb_trigger(&pq);
1506 if (pq != xive_get_field32(end_esmask, end->w1)) {
1507 end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1508 xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
1511 /* ESe/n[Q]=1 : end of notification */
1516 * An END trigger can come from an event trigger (IPI or HW) or from
1517 * another chip. We don't model the PowerBus but the END trigger
1518 * message has the same parameters than in the function below.
1520 static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1521 uint32_t end_idx, uint32_t end_data)
1531 /* END cache lookup */
1532 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1533 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1538 if (!xive_end_is_valid(&end)) {
1539 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1544 if (xive_end_is_enqueue(&end)) {
1545 xive_end_enqueue(&end, end_data);
1546 /* Enqueuing event data modifies the EQ toggle and index */
1547 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1551 * When the END is silent, we skip the notification part.
1553 if (xive_end_is_silent_escalation(&end)) {
1558 * The W7 format depends on the F bit in W6. It defines the type
1559 * of the notification :
1561 * F=0 : single or multiple NVT notification
1562 * F=1 : User level Event-Based Branch (EBB) notification, no
1565 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1566 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1568 /* The END is masked */
1569 if (format == 0 && priority == 0xff) {
1574 * Check the END ESn (Event State Buffer for notification) for
1575 * even futher coalescing in the Router
1577 if (!xive_end_is_notify(&end)) {
1578 /* ESn[Q]=1 : end of notification */
1579 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1580 &end, END_W1_ESn)) {
1586 * Follows IVPE notification
1588 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
1589 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
1591 /* NVT cache lookup */
1592 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1593 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1598 if (!xive_nvt_is_valid(&nvt)) {
1599 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1604 found = xive_presenter_notify(xrtr, format, nvt_blk, nvt_idx,
1605 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1607 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1609 /* TODO: Auto EOI. */
1616 * If no matching NVT is dispatched on a HW thread :
1617 * - specific VP: update the NVT structure if backlog is activated
1618 * - logical server : forward request to IVPE (not supported)
1620 if (xive_end_is_backlog(&end)) {
1622 qemu_log_mask(LOG_GUEST_ERROR,
1623 "XIVE: END %x/%x invalid config: F1 & backlog\n",
1627 /* Record the IPB in the associated NVT structure */
1628 ipb_update((uint8_t *) &nvt.w4, priority);
1629 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1632 * On HW, follows a "Broadcast Backlog" to IVPEs
1638 * If activated, escalate notification using the ESe PQ bits and
1641 if (!xive_end_is_escalate(&end)) {
1646 * Check the END ESe (Event State Buffer for escalation) for even
1647 * futher coalescing in the Router
1649 if (!xive_end_is_uncond_escalation(&end)) {
1650 /* ESe[Q]=1 : end of notification */
1651 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1652 &end, END_W1_ESe)) {
1658 * The END trigger becomes an Escalation trigger
1660 xive_router_end_notify(xrtr,
1661 xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1662 xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1663 xive_get_field32(END_W5_ESC_END_DATA, end.w5));
1666 void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
1668 XiveRouter *xrtr = XIVE_ROUTER(xn);
1669 uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
1670 uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
1673 /* EAS cache lookup */
1674 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1675 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1680 * The IVRE checks the State Bit Cache at this point. We skip the
1681 * SBC lookup because the state bits of the sources are modeled
1682 * internally in QEMU.
1685 if (!xive_eas_is_valid(&eas)) {
1686 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1690 if (xive_eas_is_masked(&eas)) {
1691 /* Notification completed */
1696 * The event trigger becomes an END trigger
1698 xive_router_end_notify(xrtr,
1699 xive_get_field64(EAS_END_BLOCK, eas.w),
1700 xive_get_field64(EAS_END_INDEX, eas.w),
1701 xive_get_field64(EAS_END_DATA, eas.w));
1704 static void xive_router_class_init(ObjectClass *klass, void *data)
1706 DeviceClass *dc = DEVICE_CLASS(klass);
1707 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1709 dc->desc = "XIVE Router Engine";
1710 xnc->notify = xive_router_notify;
1713 static const TypeInfo xive_router_info = {
1714 .name = TYPE_XIVE_ROUTER,
1715 .parent = TYPE_SYS_BUS_DEVICE,
1717 .class_size = sizeof(XiveRouterClass),
1718 .class_init = xive_router_class_init,
1719 .interfaces = (InterfaceInfo[]) {
1720 { TYPE_XIVE_NOTIFIER },
1725 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1727 if (!xive_eas_is_valid(eas)) {
1731 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
1732 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1733 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1734 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1735 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1739 * END ESB MMIO loads
1741 static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1743 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1744 uint32_t offset = addr & 0xFFF;
1748 uint32_t end_esmask;
1752 end_blk = xsrc->block_id;
1753 end_idx = addr >> (xsrc->esb_shift + 1);
1755 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1756 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1761 if (!xive_end_is_valid(&end)) {
1762 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1767 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1768 pq = xive_get_field32(end_esmask, end.w1);
1771 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1772 ret = xive_esb_eoi(&pq);
1774 /* Forward the source event notification for routing ?? */
1777 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1781 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1782 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1783 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1784 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1785 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1788 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1793 if (pq != xive_get_field32(end_esmask, end.w1)) {
1794 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1795 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1802 * END ESB MMIO stores are invalid
1804 static void xive_end_source_write(void *opaque, hwaddr addr,
1805 uint64_t value, unsigned size)
1807 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1808 HWADDR_PRIx"\n", addr);
1811 static const MemoryRegionOps xive_end_source_ops = {
1812 .read = xive_end_source_read,
1813 .write = xive_end_source_write,
1814 .endianness = DEVICE_BIG_ENDIAN,
1816 .min_access_size = 8,
1817 .max_access_size = 8,
1820 .min_access_size = 8,
1821 .max_access_size = 8,
1825 static void xive_end_source_realize(DeviceState *dev, Error **errp)
1827 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1829 Error *local_err = NULL;
1831 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1833 error_propagate(errp, local_err);
1834 error_prepend(errp, "required link 'xive' not found: ");
1838 xsrc->xrtr = XIVE_ROUTER(obj);
1840 if (!xsrc->nr_ends) {
1841 error_setg(errp, "Number of interrupt needs to be greater than 0");
1845 if (xsrc->esb_shift != XIVE_ESB_4K &&
1846 xsrc->esb_shift != XIVE_ESB_64K) {
1847 error_setg(errp, "Invalid ESB shift setting");
1852 * Each END is assigned an even/odd pair of MMIO pages, the even page
1853 * manages the ESn field while the odd page manages the ESe field.
1855 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1856 &xive_end_source_ops, xsrc, "xive.end",
1857 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1860 static Property xive_end_source_properties[] = {
1861 DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0),
1862 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
1863 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
1864 DEFINE_PROP_END_OF_LIST(),
1867 static void xive_end_source_class_init(ObjectClass *klass, void *data)
1869 DeviceClass *dc = DEVICE_CLASS(klass);
1871 dc->desc = "XIVE END Source";
1872 dc->props = xive_end_source_properties;
1873 dc->realize = xive_end_source_realize;
1875 * Reason: part of XIVE interrupt controller, needs to be wired up,
1876 * e.g. by spapr_xive_instance_init().
1878 dc->user_creatable = false;
1881 static const TypeInfo xive_end_source_info = {
1882 .name = TYPE_XIVE_END_SOURCE,
1883 .parent = TYPE_DEVICE,
1884 .instance_size = sizeof(XiveENDSource),
1885 .class_init = xive_end_source_class_init,
1891 static const TypeInfo xive_notifier_info = {
1892 .name = TYPE_XIVE_NOTIFIER,
1893 .parent = TYPE_INTERFACE,
1894 .class_size = sizeof(XiveNotifierClass),
1897 static void xive_register_types(void)
1899 type_register_static(&xive_source_info);
1900 type_register_static(&xive_notifier_info);
1901 type_register_static(&xive_router_info);
1902 type_register_static(&xive_end_source_info);
1903 type_register_static(&xive_tctx_info);
1906 type_init(xive_register_types)