]> Git Repo - qemu.git/blame - hw/intc/xive.c
ppc/xive: Provide silent escalation support
[qemu.git] / hw / intc / xive.c
CommitLineData
02e3ff54
CLG
1/*
2 * QEMU PowerPC XIVE interrupt controller model
3 *
4 * Copyright (c) 2017-2018, IBM Corporation.
5 *
6 * This code is licensed under the GPL version 2 or later. See the
7 * COPYING file in the top-level directory.
8 */
9
10#include "qemu/osdep.h"
11#include "qemu/log.h"
0b8fa32f 12#include "qemu/module.h"
02e3ff54
CLG
13#include "qapi/error.h"
14#include "target/ppc/cpu.h"
15#include "sysemu/cpus.h"
16#include "sysemu/dma.h"
71e8a915 17#include "sysemu/reset.h"
02e3ff54 18#include "hw/qdev-properties.h"
d6454270 19#include "migration/vmstate.h"
02e3ff54 20#include "monitor/monitor.h"
64552b6b 21#include "hw/irq.h"
02e3ff54 22#include "hw/ppc/xive.h"
207d9fe9
CLG
23#include "hw/ppc/xive_regs.h"
24
25/*
26 * XIVE Thread Interrupt Management context
27 */
28
cdd4de68
CLG
29/*
30 * Convert a priority number to an Interrupt Pending Buffer (IPB)
31 * register, which indicates a pending interrupt at the priority
32 * corresponding to the bit number
33 */
34static uint8_t priority_to_ipb(uint8_t priority)
35{
36 return priority > XIVE_PRIORITY_MAX ?
37 0 : 1 << (XIVE_PRIORITY_MAX - priority);
38}
39
40/*
41 * Convert an Interrupt Pending Buffer (IPB) register to a Pending
42 * Interrupt Priority Register (PIPR), which contains the priority of
43 * the most favored pending notification.
44 */
45static uint8_t ipb_to_pipr(uint8_t ibp)
46{
47 return ibp ? clz32((uint32_t)ibp << 24) : 0xff;
48}
49
50static void ipb_update(uint8_t *regs, uint8_t priority)
51{
52 regs[TM_IPB] |= priority_to_ipb(priority);
53 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
54}
55
56static uint8_t exception_mask(uint8_t ring)
57{
58 switch (ring) {
59 case TM_QW1_OS:
60 return TM_QW1_NSR_EO;
4836b455
CLG
61 case TM_QW3_HV_PHYS:
62 return TM_QW3_NSR_HE;
cdd4de68
CLG
63 default:
64 g_assert_not_reached();
65 }
66}
67
4aca9786
BH
68static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
69{
70 switch (ring) {
71 case TM_QW0_USER:
72 return 0; /* Not supported */
73 case TM_QW1_OS:
74 return tctx->os_output;
75 case TM_QW2_HV_POOL:
76 case TM_QW3_HV_PHYS:
77 return tctx->hv_output;
78 default:
79 return 0;
80 }
81}
82
207d9fe9
CLG
83static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
84{
cdd4de68
CLG
85 uint8_t *regs = &tctx->regs[ring];
86 uint8_t nsr = regs[TM_NSR];
87 uint8_t mask = exception_mask(ring);
88
4aca9786 89 qemu_irq_lower(xive_tctx_output(tctx, ring));
cdd4de68
CLG
90
91 if (regs[TM_NSR] & mask) {
92 uint8_t cppr = regs[TM_PIPR];
93
94 regs[TM_CPPR] = cppr;
95
96 /* Reset the pending buffer bit */
97 regs[TM_IPB] &= ~priority_to_ipb(cppr);
98 regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]);
99
100 /* Drop Exception bit */
101 regs[TM_NSR] &= ~mask;
102 }
103
104 return (nsr << 8) | regs[TM_CPPR];
105}
106
107static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring)
108{
109 uint8_t *regs = &tctx->regs[ring];
110
111 if (regs[TM_PIPR] < regs[TM_CPPR]) {
4836b455
CLG
112 switch (ring) {
113 case TM_QW1_OS:
114 regs[TM_NSR] |= TM_QW1_NSR_EO;
115 break;
116 case TM_QW3_HV_PHYS:
117 regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6);
118 break;
119 default:
120 g_assert_not_reached();
121 }
4aca9786 122 qemu_irq_raise(xive_tctx_output(tctx, ring));
cdd4de68 123 }
207d9fe9
CLG
124}
125
126static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
127{
128 if (cppr > XIVE_PRIORITY_MAX) {
129 cppr = 0xff;
130 }
131
132 tctx->regs[ring + TM_CPPR] = cppr;
cdd4de68
CLG
133
134 /* CPPR has changed, check if we need to raise a pending exception */
135 xive_tctx_notify(tctx, ring);
207d9fe9
CLG
136}
137
aaa45030
CLG
138static inline uint32_t xive_tctx_word2(uint8_t *ring)
139{
140 return *((uint32_t *) &ring[TM_WORD2]);
141}
142
207d9fe9
CLG
143/*
144 * XIVE Thread Interrupt Management Area (TIMA)
145 */
146
4836b455
CLG
147static void xive_tm_set_hv_cppr(XiveTCTX *tctx, hwaddr offset,
148 uint64_t value, unsigned size)
149{
150 xive_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff);
151}
152
153static uint64_t xive_tm_ack_hv_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
154{
155 return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
156}
157
158static uint64_t xive_tm_pull_pool_ctx(XiveTCTX *tctx, hwaddr offset,
159 unsigned size)
160{
aaa45030
CLG
161 uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
162 uint32_t qw2w2;
4836b455 163
aaa45030
CLG
164 qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
165 memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
166 return qw2w2;
4836b455
CLG
167}
168
169static void xive_tm_vt_push(XiveTCTX *tctx, hwaddr offset,
170 uint64_t value, unsigned size)
171{
172 tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = value & 0xff;
173}
174
175static uint64_t xive_tm_vt_poll(XiveTCTX *tctx, hwaddr offset, unsigned size)
176{
177 return tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] & 0xff;
178}
179
207d9fe9
CLG
180/*
181 * Define an access map for each page of the TIMA that we will use in
182 * the memory region ops to filter values when doing loads and stores
183 * of raw registers values
184 *
185 * Registers accessibility bits :
186 *
187 * 0x0 - no access
188 * 0x1 - write only
189 * 0x2 - read only
190 * 0x3 - read/write
191 */
192
193static const uint8_t xive_tm_hw_view[] = {
8256870a
CLG
194 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
195 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
196 0, 0, 3, 3, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
197 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
207d9fe9
CLG
198};
199
200static const uint8_t xive_tm_hv_view[] = {
8256870a
CLG
201 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
202 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
203 0, 0, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
204 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
207d9fe9
CLG
205};
206
207static const uint8_t xive_tm_os_view[] = {
8256870a
CLG
208 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
209 2, 3, 2, 2, 2, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
210 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
211 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
207d9fe9
CLG
212};
213
214static const uint8_t xive_tm_user_view[] = {
8256870a
CLG
215 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-0 User */
216 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-1 OS */
217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-2 POOL */
218 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* QW-3 PHYS */
207d9fe9
CLG
219};
220
221/*
222 * Overall TIMA access map for the thread interrupt management context
223 * registers
224 */
225static const uint8_t *xive_tm_views[] = {
226 [XIVE_TM_HW_PAGE] = xive_tm_hw_view,
227 [XIVE_TM_HV_PAGE] = xive_tm_hv_view,
228 [XIVE_TM_OS_PAGE] = xive_tm_os_view,
229 [XIVE_TM_USER_PAGE] = xive_tm_user_view,
230};
231
232/*
233 * Computes a register access mask for a given offset in the TIMA
234 */
235static uint64_t xive_tm_mask(hwaddr offset, unsigned size, bool write)
236{
237 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
238 uint8_t reg_offset = offset & 0x3F;
239 uint8_t reg_mask = write ? 0x1 : 0x2;
240 uint64_t mask = 0x0;
241 int i;
242
243 for (i = 0; i < size; i++) {
244 if (xive_tm_views[page_offset][reg_offset + i] & reg_mask) {
245 mask |= (uint64_t) 0xff << (8 * (size - i - 1));
246 }
247 }
248
249 return mask;
250}
251
252static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
253 unsigned size)
254{
255 uint8_t ring_offset = offset & 0x30;
256 uint8_t reg_offset = offset & 0x3F;
257 uint64_t mask = xive_tm_mask(offset, size, true);
258 int i;
259
260 /*
261 * Only 4 or 8 bytes stores are allowed and the User ring is
262 * excluded
263 */
264 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
265 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
266 HWADDR_PRIx"\n", offset);
267 return;
268 }
269
270 /*
271 * Use the register offset for the raw values and filter out
272 * reserved values
273 */
274 for (i = 0; i < size; i++) {
275 uint8_t byte_mask = (mask >> (8 * (size - i - 1)));
276 if (byte_mask) {
277 tctx->regs[reg_offset + i] = (value >> (8 * (size - i - 1))) &
278 byte_mask;
279 }
280 }
281}
282
283static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
284{
285 uint8_t ring_offset = offset & 0x30;
286 uint8_t reg_offset = offset & 0x3F;
287 uint64_t mask = xive_tm_mask(offset, size, false);
288 uint64_t ret;
289 int i;
290
291 /*
292 * Only 4 or 8 bytes loads are allowed and the User ring is
293 * excluded
294 */
295 if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
296 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
297 HWADDR_PRIx"\n", offset);
298 return -1;
299 }
300
301 /* Use the register offset for the raw values */
302 ret = 0;
303 for (i = 0; i < size; i++) {
304 ret |= (uint64_t) tctx->regs[reg_offset + i] << (8 * (size - i - 1));
305 }
306
307 /* filter out reserved values */
308 return ret & mask;
309}
310
311/*
312 * The TM context is mapped twice within each page. Stores and loads
313 * to the first mapping below 2K write and read the specified values
314 * without modification. The second mapping above 2K performs specific
315 * state changes (side effects) in addition to setting/returning the
316 * interrupt management area context of the processor thread.
317 */
318static uint64_t xive_tm_ack_os_reg(XiveTCTX *tctx, hwaddr offset, unsigned size)
319{
320 return xive_tctx_accept(tctx, TM_QW1_OS);
321}
322
323static void xive_tm_set_os_cppr(XiveTCTX *tctx, hwaddr offset,
324 uint64_t value, unsigned size)
325{
326 xive_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff);
327}
328
cdd4de68
CLG
329/*
330 * Adjust the IPB to allow a CPU to process event queues of other
331 * priorities during one physical interrupt cycle.
332 */
333static void xive_tm_set_os_pending(XiveTCTX *tctx, hwaddr offset,
334 uint64_t value, unsigned size)
335{
336 ipb_update(&tctx->regs[TM_QW1_OS], value & 0xff);
337 xive_tctx_notify(tctx, TM_QW1_OS);
338}
339
d98ec603
CLG
340static uint64_t xive_tm_pull_os_ctx(XiveTCTX *tctx, hwaddr offset,
341 unsigned size)
342{
343 uint32_t qw1w2_prev = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
344 uint32_t qw1w2;
345
346 qw1w2 = xive_set_field32(TM_QW1W2_VO, qw1w2_prev, 0);
347 memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
348 return qw1w2;
349}
350
207d9fe9
CLG
351/*
352 * Define a mapping of "special" operations depending on the TIMA page
353 * offset and the size of the operation.
354 */
355typedef struct XiveTmOp {
356 uint8_t page_offset;
357 uint32_t op_offset;
358 unsigned size;
359 void (*write_handler)(XiveTCTX *tctx, hwaddr offset, uint64_t value,
360 unsigned size);
361 uint64_t (*read_handler)(XiveTCTX *tctx, hwaddr offset, unsigned size);
362} XiveTmOp;
363
364static const XiveTmOp xive_tm_operations[] = {
365 /*
366 * MMIOs below 2K : raw values and special operations without side
367 * effects
368 */
369 { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, NULL },
4836b455
CLG
370 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, NULL },
371 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, NULL },
372 { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, xive_tm_vt_poll },
207d9fe9
CLG
373
374 /* MMIOs above 2K : special operations with side effects */
375 { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, xive_tm_ack_os_reg },
cdd4de68 376 { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, NULL },
d98ec603
CLG
377 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, xive_tm_pull_os_ctx },
378 { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, xive_tm_pull_os_ctx },
4836b455
CLG
379 { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, xive_tm_ack_hv_reg },
380 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, xive_tm_pull_pool_ctx },
381 { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, xive_tm_pull_pool_ctx },
207d9fe9
CLG
382};
383
384static const XiveTmOp *xive_tm_find_op(hwaddr offset, unsigned size, bool write)
385{
386 uint8_t page_offset = (offset >> TM_SHIFT) & 0x3;
387 uint32_t op_offset = offset & 0xFFF;
388 int i;
389
390 for (i = 0; i < ARRAY_SIZE(xive_tm_operations); i++) {
391 const XiveTmOp *xto = &xive_tm_operations[i];
392
393 /* Accesses done from a more privileged TIMA page is allowed */
394 if (xto->page_offset >= page_offset &&
395 xto->op_offset == op_offset &&
396 xto->size == size &&
397 ((write && xto->write_handler) || (!write && xto->read_handler))) {
398 return xto;
399 }
400 }
401 return NULL;
402}
403
404/*
405 * TIMA MMIO handlers
406 */
f9b9db38
CLG
407void xive_tctx_tm_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
408 unsigned size)
207d9fe9 409{
207d9fe9
CLG
410 const XiveTmOp *xto;
411
412 /*
4836b455 413 * TODO: check V bit in Q[0-3]W2
207d9fe9
CLG
414 */
415
416 /*
417 * First, check for special operations in the 2K region
418 */
419 if (offset & 0x800) {
420 xto = xive_tm_find_op(offset, size, true);
421 if (!xto) {
d98ec603 422 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
207d9fe9
CLG
423 "@%"HWADDR_PRIx"\n", offset);
424 } else {
425 xto->write_handler(tctx, offset, value, size);
426 }
427 return;
428 }
429
430 /*
431 * Then, for special operations in the region below 2K.
432 */
433 xto = xive_tm_find_op(offset, size, true);
434 if (xto) {
435 xto->write_handler(tctx, offset, value, size);
436 return;
437 }
438
439 /*
440 * Finish with raw access to the register values
441 */
442 xive_tm_raw_write(tctx, offset, value, size);
443}
444
f9b9db38 445uint64_t xive_tctx_tm_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
207d9fe9 446{
207d9fe9
CLG
447 const XiveTmOp *xto;
448
449 /*
4836b455 450 * TODO: check V bit in Q[0-3]W2
207d9fe9
CLG
451 */
452
453 /*
454 * First, check for special operations in the 2K region
455 */
456 if (offset & 0x800) {
457 xto = xive_tm_find_op(offset, size, false);
458 if (!xto) {
459 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
460 "@%"HWADDR_PRIx"\n", offset);
461 return -1;
462 }
463 return xto->read_handler(tctx, offset, size);
464 }
465
466 /*
467 * Then, for special operations in the region below 2K.
468 */
469 xto = xive_tm_find_op(offset, size, false);
470 if (xto) {
471 return xto->read_handler(tctx, offset, size);
472 }
473
474 /*
475 * Finish with raw access to the register values
476 */
477 return xive_tm_raw_read(tctx, offset, size);
478}
479
f9b9db38
CLG
480static void xive_tm_write(void *opaque, hwaddr offset,
481 uint64_t value, unsigned size)
482{
483 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
484
485 xive_tctx_tm_write(tctx, offset, value, size);
486}
487
488static uint64_t xive_tm_read(void *opaque, hwaddr offset, unsigned size)
489{
490 XiveTCTX *tctx = xive_router_get_tctx(XIVE_ROUTER(opaque), current_cpu);
491
492 return xive_tctx_tm_read(tctx, offset, size);
493}
494
207d9fe9
CLG
495const MemoryRegionOps xive_tm_ops = {
496 .read = xive_tm_read,
497 .write = xive_tm_write,
498 .endianness = DEVICE_BIG_ENDIAN,
499 .valid = {
500 .min_access_size = 1,
501 .max_access_size = 8,
502 },
503 .impl = {
504 .min_access_size = 1,
505 .max_access_size = 8,
506 },
507};
508
207d9fe9
CLG
509static char *xive_tctx_ring_print(uint8_t *ring)
510{
511 uint32_t w2 = xive_tctx_word2(ring);
512
513 return g_strdup_printf("%02x %02x %02x %02x %02x "
514 "%02x %02x %02x %08x",
515 ring[TM_NSR], ring[TM_CPPR], ring[TM_IPB], ring[TM_LSMFB],
516 ring[TM_ACK_CNT], ring[TM_INC], ring[TM_AGE], ring[TM_PIPR],
517 be32_to_cpu(w2));
518}
519
520static const char * const xive_tctx_ring_names[] = {
521 "USER", "OS", "POOL", "PHYS",
522};
523
524void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon)
525{
526 int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1;
527 int i;
528
7bfc759c
CLG
529 if (kvm_irqchip_in_kernel()) {
530 Error *local_err = NULL;
531
532 kvmppc_xive_cpu_synchronize_state(tctx, &local_err);
533 if (local_err) {
534 error_report_err(local_err);
535 return;
536 }
537 }
538
207d9fe9
CLG
539 monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR"
540 " W2\n", cpu_index);
541
542 for (i = 0; i < XIVE_TM_RING_COUNT; i++) {
543 char *s = xive_tctx_ring_print(&tctx->regs[i * XIVE_TM_RING_SIZE]);
544 monitor_printf(mon, "CPU[%04x]: %4s %s\n", cpu_index,
545 xive_tctx_ring_names[i], s);
546 g_free(s);
547 }
548}
549
550static void xive_tctx_reset(void *dev)
551{
552 XiveTCTX *tctx = XIVE_TCTX(dev);
553
554 memset(tctx->regs, 0, sizeof(tctx->regs));
555
556 /* Set some defaults */
557 tctx->regs[TM_QW1_OS + TM_LSMFB] = 0xFF;
558 tctx->regs[TM_QW1_OS + TM_ACK_CNT] = 0xFF;
559 tctx->regs[TM_QW1_OS + TM_AGE] = 0xFF;
cdd4de68
CLG
560
561 /*
562 * Initialize PIPR to 0xFF to avoid phantom interrupts when the
563 * CPPR is first set.
564 */
565 tctx->regs[TM_QW1_OS + TM_PIPR] =
566 ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]);
4836b455
CLG
567 tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] =
568 ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]);
207d9fe9
CLG
569}
570
571static void xive_tctx_realize(DeviceState *dev, Error **errp)
572{
573 XiveTCTX *tctx = XIVE_TCTX(dev);
574 PowerPCCPU *cpu;
575 CPUPPCState *env;
576 Object *obj;
577 Error *local_err = NULL;
578
579 obj = object_property_get_link(OBJECT(dev), "cpu", &local_err);
580 if (!obj) {
581 error_propagate(errp, local_err);
582 error_prepend(errp, "required link 'cpu' not found: ");
583 return;
584 }
585
586 cpu = POWERPC_CPU(obj);
587 tctx->cs = CPU(obj);
588
589 env = &cpu->env;
590 switch (PPC_INPUT(env)) {
67afe775 591 case PPC_FLAGS_INPUT_POWER9:
4aca9786
BH
592 tctx->hv_output = env->irq_inputs[POWER9_INPUT_HINT];
593 tctx->os_output = env->irq_inputs[POWER9_INPUT_INT];
67afe775 594 break;
207d9fe9
CLG
595
596 default:
597 error_setg(errp, "XIVE interrupt controller does not support "
598 "this CPU bus model");
599 return;
600 }
601
38afd772
CLG
602 /* Connect the presenter to the VCPU (required for CPU hotplug) */
603 if (kvm_irqchip_in_kernel()) {
604 kvmppc_xive_cpu_connect(tctx, &local_err);
605 if (local_err) {
606 error_propagate(errp, local_err);
607 return;
608 }
609 }
610
207d9fe9
CLG
611 qemu_register_reset(xive_tctx_reset, dev);
612}
613
614static void xive_tctx_unrealize(DeviceState *dev, Error **errp)
615{
616 qemu_unregister_reset(xive_tctx_reset, dev);
617}
618
277dd3d7
CLG
619static int vmstate_xive_tctx_pre_save(void *opaque)
620{
621 Error *local_err = NULL;
622
623 if (kvm_irqchip_in_kernel()) {
624 kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err);
625 if (local_err) {
626 error_report_err(local_err);
627 return -1;
628 }
629 }
630
631 return 0;
632}
633
310cda5b
CLG
634static int vmstate_xive_tctx_post_load(void *opaque, int version_id)
635{
636 Error *local_err = NULL;
637
638 if (kvm_irqchip_in_kernel()) {
639 /*
640 * Required for hotplugged CPU, for which the state comes
641 * after all states of the machine.
642 */
643 kvmppc_xive_cpu_set_state(XIVE_TCTX(opaque), &local_err);
644 if (local_err) {
645 error_report_err(local_err);
646 return -1;
647 }
648 }
649
650 return 0;
651}
652
207d9fe9
CLG
653static const VMStateDescription vmstate_xive_tctx = {
654 .name = TYPE_XIVE_TCTX,
655 .version_id = 1,
656 .minimum_version_id = 1,
277dd3d7 657 .pre_save = vmstate_xive_tctx_pre_save,
310cda5b 658 .post_load = vmstate_xive_tctx_post_load,
207d9fe9
CLG
659 .fields = (VMStateField[]) {
660 VMSTATE_BUFFER(regs, XiveTCTX),
661 VMSTATE_END_OF_LIST()
662 },
663};
664
665static void xive_tctx_class_init(ObjectClass *klass, void *data)
666{
667 DeviceClass *dc = DEVICE_CLASS(klass);
668
669 dc->desc = "XIVE Interrupt Thread Context";
670 dc->realize = xive_tctx_realize;
671 dc->unrealize = xive_tctx_unrealize;
672 dc->vmsd = &vmstate_xive_tctx;
673}
674
675static const TypeInfo xive_tctx_info = {
676 .name = TYPE_XIVE_TCTX,
677 .parent = TYPE_DEVICE,
678 .instance_size = sizeof(XiveTCTX),
679 .class_init = xive_tctx_class_init,
680};
02e3ff54 681
1a937ad7
CLG
682Object *xive_tctx_create(Object *cpu, XiveRouter *xrtr, Error **errp)
683{
684 Error *local_err = NULL;
685 Object *obj;
686
687 obj = object_new(TYPE_XIVE_TCTX);
688 object_property_add_child(cpu, TYPE_XIVE_TCTX, obj, &error_abort);
689 object_unref(obj);
690 object_property_add_const_link(obj, "cpu", cpu, &error_abort);
691 object_property_set_bool(obj, true, "realized", &local_err);
692 if (local_err) {
693 goto error;
694 }
695
696 return obj;
697
698error:
699 object_unparent(obj);
700 error_propagate(errp, local_err);
701 return NULL;
702}
703
02e3ff54
CLG
704/*
705 * XIVE ESB helpers
706 */
707
708static uint8_t xive_esb_set(uint8_t *pq, uint8_t value)
709{
710 uint8_t old_pq = *pq & 0x3;
711
712 *pq &= ~0x3;
713 *pq |= value & 0x3;
714
715 return old_pq;
716}
717
718static bool xive_esb_trigger(uint8_t *pq)
719{
720 uint8_t old_pq = *pq & 0x3;
721
722 switch (old_pq) {
723 case XIVE_ESB_RESET:
724 xive_esb_set(pq, XIVE_ESB_PENDING);
725 return true;
726 case XIVE_ESB_PENDING:
727 case XIVE_ESB_QUEUED:
728 xive_esb_set(pq, XIVE_ESB_QUEUED);
729 return false;
730 case XIVE_ESB_OFF:
731 xive_esb_set(pq, XIVE_ESB_OFF);
732 return false;
733 default:
734 g_assert_not_reached();
735 }
736}
737
738static bool xive_esb_eoi(uint8_t *pq)
739{
740 uint8_t old_pq = *pq & 0x3;
741
742 switch (old_pq) {
743 case XIVE_ESB_RESET:
744 case XIVE_ESB_PENDING:
745 xive_esb_set(pq, XIVE_ESB_RESET);
746 return false;
747 case XIVE_ESB_QUEUED:
748 xive_esb_set(pq, XIVE_ESB_PENDING);
749 return true;
750 case XIVE_ESB_OFF:
751 xive_esb_set(pq, XIVE_ESB_OFF);
752 return false;
753 default:
754 g_assert_not_reached();
755 }
756}
757
758/*
759 * XIVE Interrupt Source (or IVSE)
760 */
761
762uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno)
763{
764 assert(srcno < xsrc->nr_irqs);
765
766 return xsrc->status[srcno] & 0x3;
767}
768
769uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq)
770{
771 assert(srcno < xsrc->nr_irqs);
772
773 return xive_esb_set(&xsrc->status[srcno], pq);
774}
775
5fd9ef18
CLG
776/*
777 * Returns whether the event notification should be forwarded.
778 */
779static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
780{
781 uint8_t old_pq = xive_source_esb_get(xsrc, srcno);
782
783 xsrc->status[srcno] |= XIVE_STATUS_ASSERTED;
784
785 switch (old_pq) {
786 case XIVE_ESB_RESET:
787 xive_source_esb_set(xsrc, srcno, XIVE_ESB_PENDING);
788 return true;
789 default:
790 return false;
791 }
792}
793
02e3ff54
CLG
794/*
795 * Returns whether the event notification should be forwarded.
796 */
797static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
798{
5fd9ef18
CLG
799 bool ret;
800
02e3ff54
CLG
801 assert(srcno < xsrc->nr_irqs);
802
5fd9ef18
CLG
803 ret = xive_esb_trigger(&xsrc->status[srcno]);
804
805 if (xive_source_irq_is_lsi(xsrc, srcno) &&
806 xive_source_esb_get(xsrc, srcno) == XIVE_ESB_QUEUED) {
807 qemu_log_mask(LOG_GUEST_ERROR,
808 "XIVE: queued an event on LSI IRQ %d\n", srcno);
809 }
810
811 return ret;
02e3ff54
CLG
812}
813
814/*
815 * Returns whether the event notification should be forwarded.
816 */
817static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
818{
5fd9ef18
CLG
819 bool ret;
820
02e3ff54
CLG
821 assert(srcno < xsrc->nr_irqs);
822
5fd9ef18
CLG
823 ret = xive_esb_eoi(&xsrc->status[srcno]);
824
825 /*
826 * LSI sources do not set the Q bit but they can still be
827 * asserted, in which case we should forward a new event
828 * notification
829 */
830 if (xive_source_irq_is_lsi(xsrc, srcno) &&
831 xsrc->status[srcno] & XIVE_STATUS_ASSERTED) {
832 ret = xive_source_lsi_trigger(xsrc, srcno);
833 }
834
835 return ret;
02e3ff54
CLG
836}
837
838/*
839 * Forward the source event notification to the Router
840 */
841static void xive_source_notify(XiveSource *xsrc, int srcno)
842{
5e79b155 843 XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
02e3ff54 844
5e79b155
CLG
845 if (xnc->notify) {
846 xnc->notify(xsrc->xive, srcno);
847 }
02e3ff54
CLG
848}
849
850/*
851 * In a two pages ESB MMIO setting, even page is the trigger page, odd
852 * page is for management
853 */
854static inline bool addr_is_even(hwaddr addr, uint32_t shift)
855{
856 return !((addr >> shift) & 1);
857}
858
859static inline bool xive_source_is_trigger_page(XiveSource *xsrc, hwaddr addr)
860{
861 return xive_source_esb_has_2page(xsrc) &&
862 addr_is_even(addr, xsrc->esb_shift - 1);
863}
864
865/*
866 * ESB MMIO loads
867 * Trigger page Management/EOI page
868 *
869 * ESB MMIO setting 2 pages 1 or 2 pages
870 *
871 * 0x000 .. 0x3FF -1 EOI and return 0|1
872 * 0x400 .. 0x7FF -1 EOI and return 0|1
873 * 0x800 .. 0xBFF -1 return PQ
874 * 0xC00 .. 0xCFF -1 return PQ and atomically PQ=00
875 * 0xD00 .. 0xDFF -1 return PQ and atomically PQ=01
876 * 0xE00 .. 0xDFF -1 return PQ and atomically PQ=10
877 * 0xF00 .. 0xDFF -1 return PQ and atomically PQ=11
878 */
879static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
880{
881 XiveSource *xsrc = XIVE_SOURCE(opaque);
882 uint32_t offset = addr & 0xFFF;
883 uint32_t srcno = addr >> xsrc->esb_shift;
884 uint64_t ret = -1;
885
886 /* In a two pages ESB MMIO setting, trigger page should not be read */
887 if (xive_source_is_trigger_page(xsrc, addr)) {
888 qemu_log_mask(LOG_GUEST_ERROR,
889 "XIVE: invalid load on IRQ %d trigger page at "
890 "0x%"HWADDR_PRIx"\n", srcno, addr);
891 return -1;
892 }
893
894 switch (offset) {
895 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
896 ret = xive_source_esb_eoi(xsrc, srcno);
897
898 /* Forward the source event notification for routing */
899 if (ret) {
900 xive_source_notify(xsrc, srcno);
901 }
902 break;
903
904 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
905 ret = xive_source_esb_get(xsrc, srcno);
906 break;
907
908 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
909 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
910 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
911 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
912 ret = xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
913 break;
914 default:
915 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB load addr %x\n",
916 offset);
917 }
918
919 return ret;
920}
921
922/*
923 * ESB MMIO stores
924 * Trigger page Management/EOI page
925 *
926 * ESB MMIO setting 2 pages 1 or 2 pages
927 *
928 * 0x000 .. 0x3FF Trigger Trigger
929 * 0x400 .. 0x7FF Trigger EOI
930 * 0x800 .. 0xBFF Trigger undefined
931 * 0xC00 .. 0xCFF Trigger PQ=00
932 * 0xD00 .. 0xDFF Trigger PQ=01
933 * 0xE00 .. 0xDFF Trigger PQ=10
934 * 0xF00 .. 0xDFF Trigger PQ=11
935 */
936static void xive_source_esb_write(void *opaque, hwaddr addr,
937 uint64_t value, unsigned size)
938{
939 XiveSource *xsrc = XIVE_SOURCE(opaque);
940 uint32_t offset = addr & 0xFFF;
941 uint32_t srcno = addr >> xsrc->esb_shift;
942 bool notify = false;
943
944 /* In a two pages ESB MMIO setting, trigger page only triggers */
945 if (xive_source_is_trigger_page(xsrc, addr)) {
946 notify = xive_source_esb_trigger(xsrc, srcno);
947 goto out;
948 }
949
950 switch (offset) {
951 case 0 ... 0x3FF:
952 notify = xive_source_esb_trigger(xsrc, srcno);
953 break;
954
955 case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
956 if (!(xsrc->esb_flags & XIVE_SRC_STORE_EOI)) {
957 qemu_log_mask(LOG_GUEST_ERROR,
958 "XIVE: invalid Store EOI for IRQ %d\n", srcno);
959 return;
960 }
961
962 notify = xive_source_esb_eoi(xsrc, srcno);
963 break;
964
965 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
966 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
967 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
968 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
969 xive_source_esb_set(xsrc, srcno, (offset >> 8) & 0x3);
970 break;
971
972 default:
973 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr %x\n",
974 offset);
975 return;
976 }
977
978out:
979 /* Forward the source event notification for routing */
980 if (notify) {
981 xive_source_notify(xsrc, srcno);
982 }
983}
984
985static const MemoryRegionOps xive_source_esb_ops = {
986 .read = xive_source_esb_read,
987 .write = xive_source_esb_write,
988 .endianness = DEVICE_BIG_ENDIAN,
989 .valid = {
990 .min_access_size = 8,
991 .max_access_size = 8,
992 },
993 .impl = {
994 .min_access_size = 8,
995 .max_access_size = 8,
996 },
997};
998
734d9c89 999void xive_source_set_irq(void *opaque, int srcno, int val)
02e3ff54
CLG
1000{
1001 XiveSource *xsrc = XIVE_SOURCE(opaque);
1002 bool notify = false;
1003
5fd9ef18
CLG
1004 if (xive_source_irq_is_lsi(xsrc, srcno)) {
1005 if (val) {
1006 notify = xive_source_lsi_trigger(xsrc, srcno);
1007 } else {
1008 xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED;
1009 }
1010 } else {
1011 if (val) {
1012 notify = xive_source_esb_trigger(xsrc, srcno);
1013 }
02e3ff54
CLG
1014 }
1015
1016 /* Forward the source event notification for routing */
1017 if (notify) {
1018 xive_source_notify(xsrc, srcno);
1019 }
1020}
1021
1022void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon)
1023{
1024 int i;
1025
1026 for (i = 0; i < xsrc->nr_irqs; i++) {
1027 uint8_t pq = xive_source_esb_get(xsrc, i);
1028
1029 if (pq == XIVE_ESB_OFF) {
1030 continue;
1031 }
1032
5fd9ef18
CLG
1033 monitor_printf(mon, " %08x %s %c%c%c\n", i + offset,
1034 xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
02e3ff54 1035 pq & XIVE_ESB_VAL_P ? 'P' : '-',
5fd9ef18
CLG
1036 pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
1037 xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ');
02e3ff54
CLG
1038 }
1039}
1040
1041static void xive_source_reset(void *dev)
1042{
1043 XiveSource *xsrc = XIVE_SOURCE(dev);
1044
5fd9ef18
CLG
1045 /* Do not clear the LSI bitmap */
1046
02e3ff54
CLG
1047 /* PQs are initialized to 0b01 (Q=1) which corresponds to "ints off" */
1048 memset(xsrc->status, XIVE_ESB_OFF, xsrc->nr_irqs);
1049}
1050
1051static void xive_source_realize(DeviceState *dev, Error **errp)
1052{
1053 XiveSource *xsrc = XIVE_SOURCE(dev);
5e79b155
CLG
1054 Object *obj;
1055 Error *local_err = NULL;
1056
1057 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1058 if (!obj) {
1059 error_propagate(errp, local_err);
1060 error_prepend(errp, "required link 'xive' not found: ");
1061 return;
1062 }
1063
1064 xsrc->xive = XIVE_NOTIFIER(obj);
02e3ff54
CLG
1065
1066 if (!xsrc->nr_irqs) {
1067 error_setg(errp, "Number of interrupt needs to be greater than 0");
1068 return;
1069 }
1070
1071 if (xsrc->esb_shift != XIVE_ESB_4K &&
1072 xsrc->esb_shift != XIVE_ESB_4K_2PAGE &&
1073 xsrc->esb_shift != XIVE_ESB_64K &&
1074 xsrc->esb_shift != XIVE_ESB_64K_2PAGE) {
1075 error_setg(errp, "Invalid ESB shift setting");
1076 return;
1077 }
1078
1079 xsrc->status = g_malloc0(xsrc->nr_irqs);
5fd9ef18 1080 xsrc->lsi_map = bitmap_new(xsrc->nr_irqs);
02e3ff54 1081
38afd772
CLG
1082 if (!kvm_irqchip_in_kernel()) {
1083 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1084 &xive_source_esb_ops, xsrc, "xive.esb",
1085 (1ull << xsrc->esb_shift) * xsrc->nr_irqs);
1086 }
02e3ff54 1087
02e3ff54
CLG
1088 qemu_register_reset(xive_source_reset, dev);
1089}
1090
1091static const VMStateDescription vmstate_xive_source = {
1092 .name = TYPE_XIVE_SOURCE,
1093 .version_id = 1,
1094 .minimum_version_id = 1,
1095 .fields = (VMStateField[]) {
1096 VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL),
1097 VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs),
1098 VMSTATE_END_OF_LIST()
1099 },
1100};
1101
1102/*
1103 * The default XIVE interrupt source setting for the ESB MMIOs is two
1104 * 64k pages without Store EOI, to be in sync with KVM.
1105 */
1106static Property xive_source_properties[] = {
1107 DEFINE_PROP_UINT64("flags", XiveSource, esb_flags, 0),
1108 DEFINE_PROP_UINT32("nr-irqs", XiveSource, nr_irqs, 0),
1109 DEFINE_PROP_UINT32("shift", XiveSource, esb_shift, XIVE_ESB_64K_2PAGE),
1110 DEFINE_PROP_END_OF_LIST(),
1111};
1112
1113static void xive_source_class_init(ObjectClass *klass, void *data)
1114{
1115 DeviceClass *dc = DEVICE_CLASS(klass);
1116
1117 dc->desc = "XIVE Interrupt Source";
1118 dc->props = xive_source_properties;
1119 dc->realize = xive_source_realize;
1120 dc->vmsd = &vmstate_xive_source;
1121}
1122
1123static const TypeInfo xive_source_info = {
1124 .name = TYPE_XIVE_SOURCE,
1125 .parent = TYPE_DEVICE,
1126 .instance_size = sizeof(XiveSource),
1127 .class_init = xive_source_class_init,
1128};
1129
e4ddaac6
CLG
1130/*
1131 * XiveEND helpers
1132 */
1133
1134void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon)
1135{
13df9324 1136 uint64_t qaddr_base = xive_end_qaddr(end);
e4ddaac6
CLG
1137 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1138 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1139 uint32_t qentries = 1 << (qsize + 10);
1140 int i;
1141
1142 /*
1143 * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
1144 */
1145 monitor_printf(mon, " [ ");
1146 qindex = (qindex - (width - 1)) & (qentries - 1);
1147 for (i = 0; i < width; i++) {
1148 uint64_t qaddr = qaddr_base + (qindex << 2);
1149 uint32_t qdata = -1;
1150
1151 if (dma_memory_read(&address_space_memory, qaddr, &qdata,
1152 sizeof(qdata))) {
1153 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
1154 HWADDR_PRIx "\n", qaddr);
1155 return;
1156 }
1157 monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
1158 be32_to_cpu(qdata));
1159 qindex = (qindex + 1) & (qentries - 1);
1160 }
1161}
1162
1163void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon)
1164{
13df9324 1165 uint64_t qaddr_base = xive_end_qaddr(end);
e4ddaac6
CLG
1166 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1167 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1168 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1169 uint32_t qentries = 1 << (qsize + 10);
1170
1171 uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
1172 uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
1173
1174 if (!xive_end_is_valid(end)) {
1175 return;
1176 }
1177
1178 monitor_printf(mon, " %08x %c%c%c%c%c prio:%d nvt:%04x eq:@%08"PRIx64
1179 "% 6d/%5d ^%d", end_idx,
1180 xive_end_is_valid(end) ? 'v' : '-',
1181 xive_end_is_enqueue(end) ? 'q' : '-',
1182 xive_end_is_notify(end) ? 'n' : '-',
1183 xive_end_is_backlog(end) ? 'b' : '-',
1184 xive_end_is_escalate(end) ? 'e' : '-',
1185 priority, nvt, qaddr_base, qindex, qentries, qgen);
1186
1187 xive_end_queue_pic_print_info(end, 6, mon);
1188 monitor_printf(mon, "]\n");
1189}
1190
1191static void xive_end_enqueue(XiveEND *end, uint32_t data)
1192{
13df9324 1193 uint64_t qaddr_base = xive_end_qaddr(end);
e4ddaac6
CLG
1194 uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
1195 uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
1196 uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
1197
1198 uint64_t qaddr = qaddr_base + (qindex << 2);
1199 uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
1200 uint32_t qentries = 1 << (qsize + 10);
1201
1202 if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) {
1203 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
1204 HWADDR_PRIx "\n", qaddr);
1205 return;
1206 }
1207
1208 qindex = (qindex + 1) & (qentries - 1);
1209 if (qindex == 0) {
1210 qgen ^= 1;
1211 end->w1 = xive_set_field32(END_W1_GENERATION, end->w1, qgen);
1212 }
1213 end->w1 = xive_set_field32(END_W1_PAGE_OFF, end->w1, qindex);
1214}
1215
7ff7ea92
CLG
1216/*
1217 * XIVE Router (aka. Virtualization Controller or IVRE)
1218 */
1219
1220int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
1221 XiveEAS *eas)
1222{
1223 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1224
1225 return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
1226}
1227
e4ddaac6
CLG
1228int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1229 XiveEND *end)
1230{
1231 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1232
1233 return xrc->get_end(xrtr, end_blk, end_idx, end);
1234}
1235
1236int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
1237 XiveEND *end, uint8_t word_number)
1238{
1239 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1240
1241 return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
1242}
1243
af53dbf6
CLG
1244int xive_router_get_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1245 XiveNVT *nvt)
1246{
1247 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1248
1249 return xrc->get_nvt(xrtr, nvt_blk, nvt_idx, nvt);
1250}
1251
1252int xive_router_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, uint32_t nvt_idx,
1253 XiveNVT *nvt, uint8_t word_number)
1254{
1255 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1256
1257 return xrc->write_nvt(xrtr, nvt_blk, nvt_idx, nvt, word_number);
1258}
1259
40a5056c
CLG
1260XiveTCTX *xive_router_get_tctx(XiveRouter *xrtr, CPUState *cs)
1261{
1262 XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
1263
1264 return xrc->get_tctx(xrtr, cs);
1265}
1266
d514c48d 1267/*
fe9a9d52 1268 * Encode the HW CAM line in the block group mode format :
d514c48d 1269 *
fe9a9d52 1270 * chip << 19 | 0000000 0 0001 thread (7Bit)
d514c48d 1271 */
fe9a9d52 1272static uint32_t xive_tctx_hw_cam_line(XiveTCTX *tctx)
d514c48d
CLG
1273{
1274 CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
1275 uint32_t pir = env->spr_cb[SPR_PIR].default_value;
1276
fe9a9d52 1277 return xive_nvt_cam_line((pir >> 8) & 0xf, 1 << 7 | (pir & 0x7f));
d514c48d
CLG
1278}
1279
af53dbf6
CLG
1280/*
1281 * The thread context register words are in big-endian format.
1282 */
1283static int xive_presenter_tctx_match(XiveTCTX *tctx, uint8_t format,
1284 uint8_t nvt_blk, uint32_t nvt_idx,
1285 bool cam_ignore, uint32_t logic_serv)
1286{
1287 uint32_t cam = xive_nvt_cam_line(nvt_blk, nvt_idx);
d514c48d 1288 uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
af53dbf6
CLG
1289 uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
1290 uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
1291 uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
1292
1293 /*
1294 * TODO (PowerNV): ignore mode. The low order bits of the NVT
1295 * identifier are ignored in the "CAM" match.
1296 */
1297
1298 if (format == 0) {
1299 if (cam_ignore == true) {
1300 /*
1301 * F=0 & i=1: Logical server notification (bits ignored at
1302 * the end of the NVT identifier)
1303 */
1304 qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
1305 nvt_blk, nvt_idx);
1306 return -1;
1307 }
1308
1309 /* F=0 & i=0: Specific NVT notification */
1310
d514c48d
CLG
1311 /* PHYS ring */
1312 if ((be32_to_cpu(qw3w2) & TM_QW3W2_VT) &&
fe9a9d52 1313 cam == xive_tctx_hw_cam_line(tctx)) {
d514c48d
CLG
1314 return TM_QW3_HV_PHYS;
1315 }
af53dbf6
CLG
1316
1317 /* HV POOL ring */
1318 if ((be32_to_cpu(qw2w2) & TM_QW2W2_VP) &&
1319 cam == xive_get_field32(TM_QW2W2_POOL_CAM, qw2w2)) {
1320 return TM_QW2_HV_POOL;
1321 }
1322
1323 /* OS ring */
1324 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1325 cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) {
1326 return TM_QW1_OS;
1327 }
1328 } else {
1329 /* F=1 : User level Event-Based Branch (EBB) notification */
1330
1331 /* USER ring */
1332 if ((be32_to_cpu(qw1w2) & TM_QW1W2_VO) &&
1333 (cam == xive_get_field32(TM_QW1W2_OS_CAM, qw1w2)) &&
1334 (be32_to_cpu(qw0w2) & TM_QW0W2_VU) &&
1335 (logic_serv == xive_get_field32(TM_QW0W2_LOGIC_SERV, qw0w2))) {
1336 return TM_QW0_USER;
1337 }
1338 }
1339 return -1;
1340}
1341
1342typedef struct XiveTCTXMatch {
1343 XiveTCTX *tctx;
1344 uint8_t ring;
1345} XiveTCTXMatch;
1346
1347static bool xive_presenter_match(XiveRouter *xrtr, uint8_t format,
1348 uint8_t nvt_blk, uint32_t nvt_idx,
1349 bool cam_ignore, uint8_t priority,
1350 uint32_t logic_serv, XiveTCTXMatch *match)
1351{
1352 CPUState *cs;
1353
1354 /*
1355 * TODO (PowerNV): handle chip_id overwrite of block field for
1356 * hardwired CAM compares
1357 */
1358
1359 CPU_FOREACH(cs) {
40a5056c 1360 XiveTCTX *tctx = xive_router_get_tctx(xrtr, cs);
af53dbf6
CLG
1361 int ring;
1362
1363 /*
1364 * HW checks that the CPU is enabled in the Physical Thread
1365 * Enable Register (PTER).
1366 */
1367
1368 /*
1369 * Check the thread context CAM lines and record matches. We
1370 * will handle CPU exception delivery later
1371 */
1372 ring = xive_presenter_tctx_match(tctx, format, nvt_blk, nvt_idx,
1373 cam_ignore, logic_serv);
1374 /*
1375 * Save the context and follow on to catch duplicates, that we
1376 * don't support yet.
1377 */
1378 if (ring != -1) {
1379 if (match->tctx) {
1380 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
1381 "context NVT %x/%x\n", nvt_blk, nvt_idx);
1382 return false;
1383 }
1384
1385 match->ring = ring;
1386 match->tctx = tctx;
1387 }
1388 }
1389
1390 if (!match->tctx) {
1391 qemu_log_mask(LOG_UNIMP, "XIVE: NVT %x/%x is not dispatched\n",
1392 nvt_blk, nvt_idx);
1393 return false;
1394 }
1395
1396 return true;
1397}
1398
1399/*
1400 * This is our simple Xive Presenter Engine model. It is merged in the
1401 * Router as it does not require an extra object.
1402 *
1403 * It receives notification requests sent by the IVRE to find one
1404 * matching NVT (or more) dispatched on the processor threads. In case
1405 * of a single NVT notification, the process is abreviated and the
1406 * thread is signaled if a match is found. In case of a logical server
1407 * notification (bits ignored at the end of the NVT identifier), the
1408 * IVPE and IVRE select a winning thread using different filters. This
1409 * involves 2 or 3 exchanges on the PowerBus that the model does not
1410 * support.
1411 *
1412 * The parameters represent what is sent on the PowerBus
1413 */
52c5acf0 1414static bool xive_presenter_notify(XiveRouter *xrtr, uint8_t format,
af53dbf6
CLG
1415 uint8_t nvt_blk, uint32_t nvt_idx,
1416 bool cam_ignore, uint8_t priority,
1417 uint32_t logic_serv)
1418{
af53dbf6
CLG
1419 XiveTCTXMatch match = { .tctx = NULL, .ring = 0 };
1420 bool found;
1421
af53dbf6
CLG
1422 found = xive_presenter_match(xrtr, format, nvt_blk, nvt_idx, cam_ignore,
1423 priority, logic_serv, &match);
1424 if (found) {
cdd4de68
CLG
1425 ipb_update(&match.tctx->regs[match.ring], priority);
1426 xive_tctx_notify(match.tctx, match.ring);
af53dbf6
CLG
1427 }
1428
52c5acf0 1429 return found;
af53dbf6
CLG
1430}
1431
53e93492
CLG
1432/*
1433 * Notification using the END ESe/ESn bit (Event State Buffer for
1434 * escalation and notification). Profide futher coalescing in the
1435 * Router.
1436 */
1437static bool xive_router_end_es_notify(XiveRouter *xrtr, uint8_t end_blk,
1438 uint32_t end_idx, XiveEND *end,
1439 uint32_t end_esmask)
1440{
1441 uint8_t pq = xive_get_field32(end_esmask, end->w1);
1442 bool notify = xive_esb_trigger(&pq);
1443
1444 if (pq != xive_get_field32(end_esmask, end->w1)) {
1445 end->w1 = xive_set_field32(end_esmask, end->w1, pq);
1446 xive_router_write_end(xrtr, end_blk, end_idx, end, 1);
1447 }
1448
1449 /* ESe/n[Q]=1 : end of notification */
1450 return notify;
1451}
1452
e4ddaac6
CLG
1453/*
1454 * An END trigger can come from an event trigger (IPI or HW) or from
1455 * another chip. We don't model the PowerBus but the END trigger
1456 * message has the same parameters than in the function below.
1457 */
1458static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk,
1459 uint32_t end_idx, uint32_t end_data)
1460{
1461 XiveEND end;
1462 uint8_t priority;
1463 uint8_t format;
52c5acf0
CLG
1464 uint8_t nvt_blk;
1465 uint32_t nvt_idx;
1466 XiveNVT nvt;
1467 bool found;
e4ddaac6
CLG
1468
1469 /* END cache lookup */
1470 if (xive_router_get_end(xrtr, end_blk, end_idx, &end)) {
1471 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1472 end_idx);
1473 return;
1474 }
1475
1476 if (!xive_end_is_valid(&end)) {
1477 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1478 end_blk, end_idx);
1479 return;
1480 }
1481
1482 if (xive_end_is_enqueue(&end)) {
1483 xive_end_enqueue(&end, end_data);
1484 /* Enqueuing event data modifies the EQ toggle and index */
1485 xive_router_write_end(xrtr, end_blk, end_idx, &end, 1);
1486 }
1487
ad31e2d2
CLG
1488 /*
1489 * When the END is silent, we skip the notification part.
1490 */
1491 if (xive_end_is_silent_escalation(&end)) {
1492 goto do_escalation;
1493 }
1494
e4ddaac6
CLG
1495 /*
1496 * The W7 format depends on the F bit in W6. It defines the type
1497 * of the notification :
1498 *
1499 * F=0 : single or multiple NVT notification
1500 * F=1 : User level Event-Based Branch (EBB) notification, no
1501 * priority
1502 */
1503 format = xive_get_field32(END_W6_FORMAT_BIT, end.w6);
1504 priority = xive_get_field32(END_W7_F0_PRIORITY, end.w7);
1505
1506 /* The END is masked */
1507 if (format == 0 && priority == 0xff) {
1508 return;
1509 }
1510
1511 /*
1512 * Check the END ESn (Event State Buffer for notification) for
1513 * even futher coalescing in the Router
1514 */
1515 if (!xive_end_is_notify(&end)) {
002686be 1516 /* ESn[Q]=1 : end of notification */
53e93492
CLG
1517 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1518 &end, END_W1_ESn)) {
002686be
CLG
1519 return;
1520 }
e4ddaac6
CLG
1521 }
1522
1523 /*
1524 * Follows IVPE notification
1525 */
52c5acf0
CLG
1526 nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end.w6);
1527 nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end.w6);
1528
1529 /* NVT cache lookup */
1530 if (xive_router_get_nvt(xrtr, nvt_blk, nvt_idx, &nvt)) {
1531 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVT %x/%x\n",
1532 nvt_blk, nvt_idx);
1533 return;
1534 }
1535
1536 if (!xive_nvt_is_valid(&nvt)) {
1537 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is invalid\n",
1538 nvt_blk, nvt_idx);
1539 return;
1540 }
1541
1542 found = xive_presenter_notify(xrtr, format, nvt_blk, nvt_idx,
af53dbf6
CLG
1543 xive_get_field32(END_W7_F0_IGNORE, end.w7),
1544 priority,
1545 xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7));
1546
1547 /* TODO: Auto EOI. */
52c5acf0
CLG
1548
1549 if (found) {
1550 return;
1551 }
1552
1553 /*
1554 * If no matching NVT is dispatched on a HW thread :
1555 * - specific VP: update the NVT structure if backlog is activated
1556 * - logical server : forward request to IVPE (not supported)
1557 */
1558 if (xive_end_is_backlog(&end)) {
1559 if (format == 1) {
1560 qemu_log_mask(LOG_GUEST_ERROR,
1561 "XIVE: END %x/%x invalid config: F1 & backlog\n",
1562 end_blk, end_idx);
1563 return;
1564 }
1565 /* Record the IPB in the associated NVT structure */
1566 ipb_update((uint8_t *) &nvt.w4, priority);
1567 xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4);
1568
1569 /*
1570 * On HW, follows a "Broadcast Backlog" to IVPEs
1571 */
1572 }
b4e30666 1573
ad31e2d2 1574do_escalation:
b4e30666
CLG
1575 /*
1576 * If activated, escalate notification using the ESe PQ bits and
1577 * the EAS in w4-5
1578 */
1579 if (!xive_end_is_escalate(&end)) {
1580 return;
1581 }
1582
53e93492
CLG
1583 /*
1584 * Check the END ESe (Event State Buffer for escalation) for even
1585 * futher coalescing in the Router
1586 */
1587 if (!xive_end_is_uncond_escalation(&end)) {
1588 /* ESe[Q]=1 : end of notification */
1589 if (!xive_router_end_es_notify(xrtr, end_blk, end_idx,
1590 &end, END_W1_ESe)) {
1591 return;
1592 }
1593 }
1594
b4e30666
CLG
1595 /*
1596 * The END trigger becomes an Escalation trigger
1597 */
1598 xive_router_end_notify(xrtr,
1599 xive_get_field32(END_W4_ESC_END_BLOCK, end.w4),
1600 xive_get_field32(END_W4_ESC_END_INDEX, end.w4),
1601 xive_get_field32(END_W5_ESC_END_DATA, end.w5));
e4ddaac6
CLG
1602}
1603
a58a18ad 1604void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
7ff7ea92
CLG
1605{
1606 XiveRouter *xrtr = XIVE_ROUTER(xn);
1607 uint8_t eas_blk = XIVE_SRCNO_BLOCK(lisn);
1608 uint32_t eas_idx = XIVE_SRCNO_INDEX(lisn);
1609 XiveEAS eas;
1610
1611 /* EAS cache lookup */
1612 if (xive_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
1613 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
1614 return;
1615 }
1616
1617 /*
1618 * The IVRE checks the State Bit Cache at this point. We skip the
1619 * SBC lookup because the state bits of the sources are modeled
1620 * internally in QEMU.
1621 */
1622
1623 if (!xive_eas_is_valid(&eas)) {
1624 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
1625 return;
1626 }
1627
1628 if (xive_eas_is_masked(&eas)) {
1629 /* Notification completed */
1630 return;
1631 }
e4ddaac6
CLG
1632
1633 /*
1634 * The event trigger becomes an END trigger
1635 */
1636 xive_router_end_notify(xrtr,
1637 xive_get_field64(EAS_END_BLOCK, eas.w),
1638 xive_get_field64(EAS_END_INDEX, eas.w),
1639 xive_get_field64(EAS_END_DATA, eas.w));
7ff7ea92
CLG
1640}
1641
1642static void xive_router_class_init(ObjectClass *klass, void *data)
1643{
1644 DeviceClass *dc = DEVICE_CLASS(klass);
1645 XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
1646
1647 dc->desc = "XIVE Router Engine";
1648 xnc->notify = xive_router_notify;
1649}
1650
1651static const TypeInfo xive_router_info = {
1652 .name = TYPE_XIVE_ROUTER,
1653 .parent = TYPE_SYS_BUS_DEVICE,
1654 .abstract = true,
1655 .class_size = sizeof(XiveRouterClass),
1656 .class_init = xive_router_class_init,
1657 .interfaces = (InterfaceInfo[]) {
1658 { TYPE_XIVE_NOTIFIER },
1659 { }
1660 }
1661};
1662
1663void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon)
1664{
1665 if (!xive_eas_is_valid(eas)) {
1666 return;
1667 }
1668
1669 monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
1670 lisn, xive_eas_is_masked(eas) ? "M" : " ",
1671 (uint8_t) xive_get_field64(EAS_END_BLOCK, eas->w),
1672 (uint32_t) xive_get_field64(EAS_END_INDEX, eas->w),
1673 (uint32_t) xive_get_field64(EAS_END_DATA, eas->w));
1674}
1675
002686be
CLG
1676/*
1677 * END ESB MMIO loads
1678 */
1679static uint64_t xive_end_source_read(void *opaque, hwaddr addr, unsigned size)
1680{
1681 XiveENDSource *xsrc = XIVE_END_SOURCE(opaque);
1682 uint32_t offset = addr & 0xFFF;
1683 uint8_t end_blk;
1684 uint32_t end_idx;
1685 XiveEND end;
1686 uint32_t end_esmask;
1687 uint8_t pq;
1688 uint64_t ret = -1;
1689
1690 end_blk = xsrc->block_id;
1691 end_idx = addr >> (xsrc->esb_shift + 1);
1692
1693 if (xive_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
1694 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
1695 end_idx);
1696 return -1;
1697 }
1698
1699 if (!xive_end_is_valid(&end)) {
1700 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
1701 end_blk, end_idx);
1702 return -1;
1703 }
1704
1705 end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END_W1_ESn : END_W1_ESe;
1706 pq = xive_get_field32(end_esmask, end.w1);
1707
1708 switch (offset) {
1709 case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
1710 ret = xive_esb_eoi(&pq);
1711
1712 /* Forward the source event notification for routing ?? */
1713 break;
1714
1715 case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
1716 ret = pq;
1717 break;
1718
1719 case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
1720 case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
1721 case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
1722 case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
1723 ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
1724 break;
1725 default:
1726 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
1727 offset);
1728 return -1;
1729 }
1730
1731 if (pq != xive_get_field32(end_esmask, end.w1)) {
1732 end.w1 = xive_set_field32(end_esmask, end.w1, pq);
1733 xive_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
1734 }
1735
1736 return ret;
1737}
1738
1739/*
1740 * END ESB MMIO stores are invalid
1741 */
1742static void xive_end_source_write(void *opaque, hwaddr addr,
1743 uint64_t value, unsigned size)
1744{
1745 qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid ESB write addr 0x%"
1746 HWADDR_PRIx"\n", addr);
1747}
1748
1749static const MemoryRegionOps xive_end_source_ops = {
1750 .read = xive_end_source_read,
1751 .write = xive_end_source_write,
1752 .endianness = DEVICE_BIG_ENDIAN,
1753 .valid = {
1754 .min_access_size = 8,
1755 .max_access_size = 8,
1756 },
1757 .impl = {
1758 .min_access_size = 8,
1759 .max_access_size = 8,
1760 },
1761};
1762
1763static void xive_end_source_realize(DeviceState *dev, Error **errp)
1764{
1765 XiveENDSource *xsrc = XIVE_END_SOURCE(dev);
1766 Object *obj;
1767 Error *local_err = NULL;
1768
1769 obj = object_property_get_link(OBJECT(dev), "xive", &local_err);
1770 if (!obj) {
1771 error_propagate(errp, local_err);
1772 error_prepend(errp, "required link 'xive' not found: ");
1773 return;
1774 }
1775
1776 xsrc->xrtr = XIVE_ROUTER(obj);
1777
1778 if (!xsrc->nr_ends) {
1779 error_setg(errp, "Number of interrupt needs to be greater than 0");
1780 return;
1781 }
1782
1783 if (xsrc->esb_shift != XIVE_ESB_4K &&
1784 xsrc->esb_shift != XIVE_ESB_64K) {
1785 error_setg(errp, "Invalid ESB shift setting");
1786 return;
1787 }
1788
1789 /*
1790 * Each END is assigned an even/odd pair of MMIO pages, the even page
1791 * manages the ESn field while the odd page manages the ESe field.
1792 */
1793 memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
1794 &xive_end_source_ops, xsrc, "xive.end",
1795 (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
1796}
1797
1798static Property xive_end_source_properties[] = {
1799 DEFINE_PROP_UINT8("block-id", XiveENDSource, block_id, 0),
1800 DEFINE_PROP_UINT32("nr-ends", XiveENDSource, nr_ends, 0),
1801 DEFINE_PROP_UINT32("shift", XiveENDSource, esb_shift, XIVE_ESB_64K),
1802 DEFINE_PROP_END_OF_LIST(),
1803};
1804
1805static void xive_end_source_class_init(ObjectClass *klass, void *data)
1806{
1807 DeviceClass *dc = DEVICE_CLASS(klass);
1808
1809 dc->desc = "XIVE END Source";
1810 dc->props = xive_end_source_properties;
1811 dc->realize = xive_end_source_realize;
1812}
1813
1814static const TypeInfo xive_end_source_info = {
1815 .name = TYPE_XIVE_END_SOURCE,
1816 .parent = TYPE_DEVICE,
1817 .instance_size = sizeof(XiveENDSource),
1818 .class_init = xive_end_source_class_init,
1819};
1820
5e79b155 1821/*
6bf6f3a1 1822 * XIVE Notifier
5e79b155 1823 */
6bf6f3a1 1824static const TypeInfo xive_notifier_info = {
5e79b155
CLG
1825 .name = TYPE_XIVE_NOTIFIER,
1826 .parent = TYPE_INTERFACE,
1827 .class_size = sizeof(XiveNotifierClass),
1828};
1829
02e3ff54
CLG
1830static void xive_register_types(void)
1831{
1832 type_register_static(&xive_source_info);
6bf6f3a1 1833 type_register_static(&xive_notifier_info);
7ff7ea92 1834 type_register_static(&xive_router_info);
002686be 1835 type_register_static(&xive_end_source_info);
207d9fe9 1836 type_register_static(&xive_tctx_info);
02e3ff54
CLG
1837}
1838
1839type_init(xive_register_types)
This page took 0.314065 seconds and 4 git commands to generate.