2 * ITS emulation for a GICv3-based system
4 * Copyright Linaro.org 2021
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your
10 * option) any later version. See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
17 #include "hw/qdev-properties.h"
18 #include "hw/intc/arm_gicv3_its_common.h"
19 #include "gicv3_internal.h"
20 #include "qom/object.h"
21 #include "qapi/error.h"
23 typedef struct GICv3ITSClass GICv3ITSClass;
24 /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
25 DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
26 ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
28 struct GICv3ITSClass {
29 GICv3ITSCommonClass parent_class;
30 void (*parent_reset)(DeviceState *dev);
34 * This is an internal enum used to distinguish between LPI triggered
35 * via command queue and LPI triggered via gits_translater write.
37 typedef enum ItsCmdType {
38 NONE = 0, /* internal indication for GITS_TRANSLATER write */
49 typedef struct DTEntry {
56 * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
57 * if a command parameter is not correct. These include both "stall
58 * processing of the command queue" and "ignore this command, and
59 * keep processing the queue". In our implementation we choose that
60 * memory transaction errors reading the command packet provoke a
61 * stall, but errors in parameters cause us to ignore the command
62 * and continue processing.
63 * The process_* functions which handle individual ITS commands all
64 * return an ItsCmdResult which tells process_cmdq() whether it should
65 * stall or keep going.
67 typedef enum ItsCmdResult {
72 static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
77 case GITS_PAGE_SIZE_4K:
78 case GITS_PAGE_SIZE_16K:
79 result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
82 case GITS_PAGE_SIZE_64K:
83 result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
84 result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
93 static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
94 uint32_t idx, MemTxResult *res)
97 * Given a TableDesc describing one of the ITS in-guest-memory
98 * tables and an index into it, return the guest address
99 * corresponding to that table entry.
100 * If there was a memory error reading the L1 table of an
101 * indirect table, *res is set accordingly, and we return -1.
102 * If the L1 table entry is marked not valid, we return -1 with
103 * *res set to MEMTX_OK.
105 * The specification defines the format of level 1 entries of a
106 * 2-level table, but the format of level 2 entries and the format
107 * of flat-mapped tables is IMPDEF.
109 AddressSpace *as = &s->gicv3->dma_as;
112 uint32_t num_l2_entries;
117 /* Single level table */
118 return td->base_addr + idx * td->entry_sz;
121 /* Two level table */
122 l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
124 l2 = address_space_ldq_le(as,
125 td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
126 MEMTXATTRS_UNSPECIFIED, res);
127 if (*res != MEMTX_OK) {
130 if (!(l2 & L2_TABLE_VALID_MASK)) {
134 num_l2_entries = td->page_sz / td->entry_sz;
135 return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
138 static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
141 AddressSpace *as = &s->gicv3->dma_as;
142 uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, res);
144 if (entry_addr == -1) {
145 return false; /* not valid */
148 *cte = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, res);
149 return FIELD_EX64(*cte, CTE, VALID);
152 static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
155 AddressSpace *as = &s->gicv3->dma_as;
156 MemTxResult res = MEMTX_OK;
158 address_space_stq_le(as, dte->ittaddr + (eventid * (sizeof(uint64_t) +
159 sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
162 if (res == MEMTX_OK) {
163 address_space_stl_le(as, dte->ittaddr + (eventid * (sizeof(uint64_t) +
164 sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
165 MEMTXATTRS_UNSPECIFIED, &res);
167 if (res != MEMTX_OK) {
174 static bool get_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
175 uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
177 AddressSpace *as = &s->gicv3->dma_as;
181 ite.itel = address_space_ldq_le(as, dte->ittaddr +
182 (eventid * (sizeof(uint64_t) +
183 sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
186 if (*res == MEMTX_OK) {
187 ite.iteh = address_space_ldl_le(as, dte->ittaddr +
188 (eventid * (sizeof(uint64_t) +
189 sizeof(uint32_t))) + sizeof(uint32_t),
190 MEMTXATTRS_UNSPECIFIED, res);
192 if (*res == MEMTX_OK) {
193 if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
194 int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
195 if (inttype == ITE_INTTYPE_PHYSICAL) {
196 *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
197 *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
207 * Read the Device Table entry at index @devid. On success (including
208 * successfully determining that there is no valid DTE for this index),
209 * we return MEMTX_OK and populate the DTEntry struct accordingly.
210 * If there is an error reading memory then we return the error code.
212 static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
214 MemTxResult res = MEMTX_OK;
215 AddressSpace *as = &s->gicv3->dma_as;
216 uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
219 if (entry_addr == -1) {
220 /* No L2 table entry, i.e. no valid DTE, or a memory error */
224 dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
225 if (res != MEMTX_OK) {
228 dte->valid = FIELD_EX64(dteval, DTE, VALID);
229 dte->size = FIELD_EX64(dteval, DTE, SIZE);
230 /* DTE word field stores bits [51:8] of the ITT address */
231 dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
236 * This function handles the processing of following commands based on
237 * the ItsCmdType parameter passed:-
238 * 1. triggering of lpi interrupt translation via ITS INT command
239 * 2. triggering of lpi interrupt translation via gits_translater register
240 * 3. handling of ITS CLEAR command
241 * 4. handling of ITS DISCARD command
243 static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
244 uint32_t eventid, ItsCmdType cmd)
246 MemTxResult res = MEMTX_OK;
247 uint64_t num_eventids;
250 bool ite_valid = false;
252 bool cte_valid = false;
256 if (devid >= s->dt.num_entries) {
257 qemu_log_mask(LOG_GUEST_ERROR,
258 "%s: invalid command attributes: devid %d>=%d",
259 __func__, devid, s->dt.num_entries);
263 if (get_dte(s, devid, &dte) != MEMTX_OK) {
267 qemu_log_mask(LOG_GUEST_ERROR,
268 "%s: invalid command attributes: "
269 "invalid dte for %d\n", __func__, devid);
273 num_eventids = 1ULL << (dte.size + 1);
274 if (eventid >= num_eventids) {
275 qemu_log_mask(LOG_GUEST_ERROR,
276 "%s: invalid command attributes: eventid %d >= %"
278 __func__, eventid, num_eventids);
282 ite_valid = get_ite(s, eventid, &dte, &icid, &pIntid, &res);
283 if (res != MEMTX_OK) {
288 qemu_log_mask(LOG_GUEST_ERROR,
289 "%s: invalid command attributes: invalid ITE\n",
294 if (icid >= s->ct.num_entries) {
295 qemu_log_mask(LOG_GUEST_ERROR,
296 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
301 cte_valid = get_cte(s, icid, &cte, &res);
302 if (res != MEMTX_OK) {
306 qemu_log_mask(LOG_GUEST_ERROR,
307 "%s: invalid command attributes: "
308 "invalid cte: %"PRIx64"\n",
314 * Current implementation only supports rdbase == procnum
315 * Hence rdbase physical address is ignored
317 rdbase = FIELD_EX64(cte, CTE, RDBASE);
319 if (rdbase >= s->gicv3->num_cpu) {
323 if ((cmd == CLEAR) || (cmd == DISCARD)) {
324 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
326 gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
329 if (cmd == DISCARD) {
331 /* remove mapping from interrupt translation table */
332 return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
336 static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
339 uint32_t devid, eventid;
341 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
342 eventid = cmdpkt[1] & EVENTID_MASK;
343 return do_process_its_cmd(s, devid, eventid, cmd);
346 static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
349 uint32_t devid, eventid;
351 uint64_t num_eventids;
357 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
358 eventid = cmdpkt[1] & EVENTID_MASK;
363 pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
366 icid = cmdpkt[2] & ICID_MASK;
368 if (devid >= s->dt.num_entries) {
369 qemu_log_mask(LOG_GUEST_ERROR,
370 "%s: invalid command attributes: devid %d>=%d",
371 __func__, devid, s->dt.num_entries);
375 if (get_dte(s, devid, &dte) != MEMTX_OK) {
378 num_eventids = 1ULL << (dte.size + 1);
379 num_intids = 1ULL << (GICD_TYPER_IDBITS + 1);
381 if ((icid >= s->ct.num_entries)
382 || !dte.valid || (eventid >= num_eventids) ||
383 (((pIntid < GICV3_LPI_INTID_START) || (pIntid >= num_intids)) &&
384 (pIntid != INTID_SPURIOUS))) {
385 qemu_log_mask(LOG_GUEST_ERROR,
386 "%s: invalid command attributes "
387 "icid %d or eventid %d or pIntid %d or"
388 "unmapped dte %d\n", __func__, icid, eventid,
391 * in this implementation, in case of error
392 * we ignore this command and move onto the next
393 * command in the queue
398 /* add ite entry to interrupt translation table */
399 ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, true);
400 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
401 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
402 ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
403 ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
405 return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
408 static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
411 AddressSpace *as = &s->gicv3->dma_as;
414 MemTxResult res = MEMTX_OK;
421 /* add mapping entry to collection table */
422 cte = FIELD_DP64(cte, CTE, VALID, 1);
423 cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
426 entry_addr = table_entry_addr(s, &s->ct, icid, &res);
427 if (res != MEMTX_OK) {
428 /* memory access error: stall */
431 if (entry_addr == -1) {
432 /* No L2 table for this index: discard write and continue */
436 address_space_stq_le(as, entry_addr, cte, MEMTXATTRS_UNSPECIFIED, &res);
437 return res == MEMTX_OK;
440 static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
446 icid = cmdpkt[2] & ICID_MASK;
448 rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
449 rdbase &= RDBASE_PROCNUM_MASK;
451 valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
453 if ((icid >= s->ct.num_entries) || (rdbase >= s->gicv3->num_cpu)) {
454 qemu_log_mask(LOG_GUEST_ERROR,
455 "ITS MAPC: invalid collection table attributes "
456 "icid %d rdbase %" PRIu64 "\n", icid, rdbase);
458 * in this implementation, in case of error
459 * we ignore this command and move onto the next
460 * command in the queue
465 return update_cte(s, icid, valid, rdbase) ? CMD_CONTINUE : CMD_STALL;
468 static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
469 uint8_t size, uint64_t itt_addr)
471 AddressSpace *as = &s->gicv3->dma_as;
474 MemTxResult res = MEMTX_OK;
478 /* add mapping entry to device table */
479 dte = FIELD_DP64(dte, DTE, VALID, 1);
480 dte = FIELD_DP64(dte, DTE, SIZE, size);
481 dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
487 entry_addr = table_entry_addr(s, &s->dt, devid, &res);
488 if (res != MEMTX_OK) {
489 /* memory access error: stall */
492 if (entry_addr == -1) {
493 /* No L2 table for this index: discard write and continue */
496 address_space_stq_le(as, entry_addr, dte, MEMTXATTRS_UNSPECIFIED, &res);
497 return res == MEMTX_OK;
500 static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
507 devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
508 size = cmdpkt[1] & SIZE_MASK;
509 itt_addr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
510 valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
512 if ((devid >= s->dt.num_entries) ||
513 (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
514 qemu_log_mask(LOG_GUEST_ERROR,
515 "ITS MAPD: invalid device table attributes "
516 "devid %d or size %d\n", devid, size);
518 * in this implementation, in case of error
519 * we ignore this command and move onto the next
520 * command in the queue
525 return update_dte(s, devid, valid, size, itt_addr) ? CMD_CONTINUE : CMD_STALL;
528 static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
532 rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
533 rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
535 if (rd1 >= s->gicv3->num_cpu) {
536 qemu_log_mask(LOG_GUEST_ERROR,
537 "%s: RDBASE1 %" PRId64
538 " out of range (must be less than %d)\n",
539 __func__, rd1, s->gicv3->num_cpu);
542 if (rd2 >= s->gicv3->num_cpu) {
543 qemu_log_mask(LOG_GUEST_ERROR,
544 "%s: RDBASE2 %" PRId64
545 " out of range (must be less than %d)\n",
546 __func__, rd2, s->gicv3->num_cpu);
551 /* Move to same target must succeed as a no-op */
555 /* Move all pending LPIs from redistributor 1 to redistributor 2 */
556 gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
561 static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
563 MemTxResult res = MEMTX_OK;
564 uint32_t devid, eventid, intid;
565 uint16_t old_icid, new_icid;
566 uint64_t old_cte, new_cte;
567 uint64_t old_rdbase, new_rdbase;
568 bool ite_valid, cte_valid;
569 uint64_t num_eventids;
573 devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
574 eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
575 new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
577 if (devid >= s->dt.num_entries) {
578 qemu_log_mask(LOG_GUEST_ERROR,
579 "%s: invalid command attributes: devid %d>=%d",
580 __func__, devid, s->dt.num_entries);
583 if (get_dte(s, devid, &dte) != MEMTX_OK) {
588 qemu_log_mask(LOG_GUEST_ERROR,
589 "%s: invalid command attributes: "
590 "invalid dte for %d\n", __func__, devid);
594 num_eventids = 1ULL << (dte.size + 1);
595 if (eventid >= num_eventids) {
596 qemu_log_mask(LOG_GUEST_ERROR,
597 "%s: invalid command attributes: eventid %d >= %"
599 __func__, eventid, num_eventids);
603 ite_valid = get_ite(s, eventid, &dte, &old_icid, &intid, &res);
604 if (res != MEMTX_OK) {
609 qemu_log_mask(LOG_GUEST_ERROR,
610 "%s: invalid command attributes: invalid ITE\n",
615 if (old_icid >= s->ct.num_entries) {
616 qemu_log_mask(LOG_GUEST_ERROR,
617 "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
622 if (new_icid >= s->ct.num_entries) {
623 qemu_log_mask(LOG_GUEST_ERROR,
624 "%s: invalid command attributes: ICID 0x%x\n",
629 cte_valid = get_cte(s, old_icid, &old_cte, &res);
630 if (res != MEMTX_OK) {
634 qemu_log_mask(LOG_GUEST_ERROR,
635 "%s: invalid command attributes: "
636 "invalid cte: %"PRIx64"\n",
641 cte_valid = get_cte(s, new_icid, &new_cte, &res);
642 if (res != MEMTX_OK) {
646 qemu_log_mask(LOG_GUEST_ERROR,
647 "%s: invalid command attributes: "
648 "invalid cte: %"PRIx64"\n",
653 old_rdbase = FIELD_EX64(old_cte, CTE, RDBASE);
654 if (old_rdbase >= s->gicv3->num_cpu) {
655 qemu_log_mask(LOG_GUEST_ERROR,
656 "%s: CTE has invalid rdbase 0x%"PRIx64"\n",
657 __func__, old_rdbase);
661 new_rdbase = FIELD_EX64(new_cte, CTE, RDBASE);
662 if (new_rdbase >= s->gicv3->num_cpu) {
663 qemu_log_mask(LOG_GUEST_ERROR,
664 "%s: CTE has invalid rdbase 0x%"PRIx64"\n",
665 __func__, new_rdbase);
669 if (old_rdbase != new_rdbase) {
670 /* Move the LPI from the old redistributor to the new one */
671 gicv3_redist_mov_lpi(&s->gicv3->cpu[old_rdbase],
672 &s->gicv3->cpu[new_rdbase],
676 /* Update the ICID field in the interrupt translation table entry */
677 ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, 1);
678 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
679 ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, intid);
680 ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
681 ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, new_icid);
682 return update_ite(s, eventid, &dte, ite) ? CMD_CONTINUE : CMD_STALL;
686 * Current implementation blocks until all
687 * commands are processed
689 static void process_cmdq(GICv3ITSState *s)
691 uint32_t wr_offset = 0;
692 uint32_t rd_offset = 0;
693 uint32_t cq_offset = 0;
694 AddressSpace *as = &s->gicv3->dma_as;
698 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
702 wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
704 if (wr_offset >= s->cq.num_entries) {
705 qemu_log_mask(LOG_GUEST_ERROR,
706 "%s: invalid write offset "
707 "%d\n", __func__, wr_offset);
711 rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
713 if (rd_offset >= s->cq.num_entries) {
714 qemu_log_mask(LOG_GUEST_ERROR,
715 "%s: invalid read offset "
716 "%d\n", __func__, rd_offset);
720 while (wr_offset != rd_offset) {
721 ItsCmdResult result = CMD_CONTINUE;
724 uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
726 cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
728 buflen = GITS_CMDQ_ENTRY_SIZE;
729 hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
730 &buflen, false, MEMTXATTRS_UNSPECIFIED);
731 if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
733 address_space_unmap(as, hostmem, buflen, false, 0);
735 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
736 qemu_log_mask(LOG_GUEST_ERROR,
737 "%s: could not read command at 0x%" PRIx64 "\n",
738 __func__, s->cq.base_addr + cq_offset);
741 for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
742 cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
744 address_space_unmap(as, hostmem, buflen, false, 0);
746 cmd = cmdpkt[0] & CMD_MASK;
748 trace_gicv3_its_process_command(rd_offset, cmd);
752 result = process_its_cmd(s, cmdpkt, INTERRUPT);
755 result = process_its_cmd(s, cmdpkt, CLEAR);
759 * Current implementation makes a blocking synchronous call
760 * for every command issued earlier, hence the internal state
761 * is already consistent by the time SYNC command is executed.
762 * Hence no further processing is required for SYNC command.
766 result = process_mapd(s, cmdpkt);
769 result = process_mapc(s, cmdpkt);
772 result = process_mapti(s, cmdpkt, false);
775 result = process_mapti(s, cmdpkt, true);
777 case GITS_CMD_DISCARD:
778 result = process_its_cmd(s, cmdpkt, DISCARD);
781 case GITS_CMD_INVALL:
783 * Current implementation doesn't cache any ITS tables,
784 * but the calculated lpi priority information. We only
785 * need to trigger lpi priority re-calculation to be in
786 * sync with LPI config table or pending table changes.
788 for (i = 0; i < s->gicv3->num_cpu; i++) {
789 gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
793 result = process_movi(s, cmdpkt);
795 case GITS_CMD_MOVALL:
796 result = process_movall(s, cmdpkt);
801 if (result == CMD_CONTINUE) {
803 rd_offset %= s->cq.num_entries;
804 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
807 s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
808 qemu_log_mask(LOG_GUEST_ERROR,
809 "%s: 0x%x cmd processing failed, stalling\n",
817 * This function extracts the ITS Device and Collection table specific
818 * parameters (like base_addr, size etc) from GITS_BASER register.
819 * It is called during ITS enable and also during post_load migration
821 static void extract_table_params(GICv3ITSState *s)
823 uint16_t num_pages = 0;
824 uint8_t page_sz_type;
826 uint32_t page_sz = 0;
829 for (int i = 0; i < 8; i++) {
839 page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
841 switch (page_sz_type) {
843 page_sz = GITS_PAGE_SIZE_4K;
847 page_sz = GITS_PAGE_SIZE_16K;
852 page_sz = GITS_PAGE_SIZE_64K;
856 g_assert_not_reached();
859 num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
861 type = FIELD_EX64(value, GITS_BASER, TYPE);
864 case GITS_BASER_TYPE_DEVICE:
866 idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
868 case GITS_BASER_TYPE_COLLECTION:
870 if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
871 idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
873 /* 16-bit CollectionId supported when CIL == 0 */
879 * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
880 * ensures we will only see type values corresponding to
881 * the values set up in gicv3_its_reset().
883 g_assert_not_reached();
886 memset(td, 0, sizeof(*td));
887 td->valid = FIELD_EX64(value, GITS_BASER, VALID);
889 * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
890 * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
891 * do not have a special case where the GITS_BASER<n>.Valid bit is 0
892 * for the register corresponding to the Collection table but we
893 * still have to process interrupts using non-memory-backed
894 * Collection table entries.)
899 td->page_sz = page_sz;
900 td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
901 td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
902 td->base_addr = baser_base_addr(value, page_sz);
904 td->num_entries = (num_pages * page_sz) / td->entry_sz;
906 td->num_entries = (((num_pages * page_sz) /
907 L1TABLE_ENTRY_SIZE) *
908 (page_sz / td->entry_sz));
910 td->num_entries = MIN(td->num_entries, 1ULL << idbits);
914 static void extract_cmdq_params(GICv3ITSState *s)
916 uint16_t num_pages = 0;
917 uint64_t value = s->cbaser;
919 num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
921 memset(&s->cq, 0 , sizeof(s->cq));
922 s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
925 s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
926 GITS_CMDQ_ENTRY_SIZE;
927 s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
928 s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
932 static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
933 uint64_t *data, unsigned size,
937 * GITS_TRANSLATER is write-only, and all other addresses
938 * in the interrupt translation space frame are RES0.
944 static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
945 uint64_t data, unsigned size,
948 GICv3ITSState *s = (GICv3ITSState *)opaque;
951 trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
954 case GITS_TRANSLATER:
955 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
956 result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
970 static bool its_writel(GICv3ITSState *s, hwaddr offset,
971 uint64_t value, MemTxAttrs attrs)
978 if (value & R_GITS_CTLR_ENABLED_MASK) {
979 s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
980 extract_table_params(s);
981 extract_cmdq_params(s);
984 s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
989 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
992 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
993 s->cbaser = deposit64(s->cbaser, 0, 32, value);
997 case GITS_CBASER + 4:
999 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1002 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1003 s->cbaser = deposit64(s->cbaser, 32, 32, value);
1008 s->cwriter = deposit64(s->cwriter, 0, 32,
1009 (value & ~R_GITS_CWRITER_RETRY_MASK));
1010 if (s->cwriter != s->creadr) {
1014 case GITS_CWRITER + 4:
1015 s->cwriter = deposit64(s->cwriter, 32, 32, value);
1018 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1019 s->creadr = deposit64(s->creadr, 0, 32,
1020 (value & ~R_GITS_CREADR_STALLED_MASK));
1022 /* RO register, ignore the write */
1023 qemu_log_mask(LOG_GUEST_ERROR,
1024 "%s: invalid guest write to RO register at offset "
1025 TARGET_FMT_plx "\n", __func__, offset);
1028 case GITS_CREADR + 4:
1029 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1030 s->creadr = deposit64(s->creadr, 32, 32, value);
1032 /* RO register, ignore the write */
1033 qemu_log_mask(LOG_GUEST_ERROR,
1034 "%s: invalid guest write to RO register at offset "
1035 TARGET_FMT_plx "\n", __func__, offset);
1038 case GITS_BASER ... GITS_BASER + 0x3f:
1040 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1043 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1044 index = (offset - GITS_BASER) / 8;
1046 if (s->baser[index] == 0) {
1047 /* Unimplemented GITS_BASERn: RAZ/WI */
1052 value &= ~GITS_BASER_RO_MASK;
1053 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
1054 s->baser[index] |= value;
1056 value &= ~GITS_BASER_RO_MASK;
1057 s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
1058 s->baser[index] |= value;
1063 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1064 /* RO registers, ignore the write */
1065 qemu_log_mask(LOG_GUEST_ERROR,
1066 "%s: invalid guest write to RO register at offset "
1067 TARGET_FMT_plx "\n", __func__, offset);
1076 static bool its_readl(GICv3ITSState *s, hwaddr offset,
1077 uint64_t *data, MemTxAttrs attrs)
1087 *data = gicv3_iidr();
1089 case GITS_IDREGS ... GITS_IDREGS + 0x2f:
1091 *data = gicv3_idreg(offset - GITS_IDREGS);
1094 *data = extract64(s->typer, 0, 32);
1096 case GITS_TYPER + 4:
1097 *data = extract64(s->typer, 32, 32);
1100 *data = extract64(s->cbaser, 0, 32);
1102 case GITS_CBASER + 4:
1103 *data = extract64(s->cbaser, 32, 32);
1106 *data = extract64(s->creadr, 0, 32);
1108 case GITS_CREADR + 4:
1109 *data = extract64(s->creadr, 32, 32);
1112 *data = extract64(s->cwriter, 0, 32);
1114 case GITS_CWRITER + 4:
1115 *data = extract64(s->cwriter, 32, 32);
1117 case GITS_BASER ... GITS_BASER + 0x3f:
1118 index = (offset - GITS_BASER) / 8;
1120 *data = extract64(s->baser[index], 32, 32);
1122 *data = extract64(s->baser[index], 0, 32);
1132 static bool its_writell(GICv3ITSState *s, hwaddr offset,
1133 uint64_t value, MemTxAttrs attrs)
1139 case GITS_BASER ... GITS_BASER + 0x3f:
1141 * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
1144 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1145 index = (offset - GITS_BASER) / 8;
1146 if (s->baser[index] == 0) {
1147 /* Unimplemented GITS_BASERn: RAZ/WI */
1150 s->baser[index] &= GITS_BASER_RO_MASK;
1151 s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
1156 * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
1159 if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
1165 s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
1166 if (s->cwriter != s->creadr) {
1171 if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
1172 s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
1174 /* RO register, ignore the write */
1175 qemu_log_mask(LOG_GUEST_ERROR,
1176 "%s: invalid guest write to RO register at offset "
1177 TARGET_FMT_plx "\n", __func__, offset);
1181 /* RO registers, ignore the write */
1182 qemu_log_mask(LOG_GUEST_ERROR,
1183 "%s: invalid guest write to RO register at offset "
1184 TARGET_FMT_plx "\n", __func__, offset);
1193 static bool its_readll(GICv3ITSState *s, hwaddr offset,
1194 uint64_t *data, MemTxAttrs attrs)
1203 case GITS_BASER ... GITS_BASER + 0x3f:
1204 index = (offset - GITS_BASER) / 8;
1205 *data = s->baser[index];
1223 static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
1224 unsigned size, MemTxAttrs attrs)
1226 GICv3ITSState *s = (GICv3ITSState *)opaque;
1231 result = its_readl(s, offset, data, attrs);
1234 result = its_readll(s, offset, data, attrs);
1242 qemu_log_mask(LOG_GUEST_ERROR,
1243 "%s: invalid guest read at offset " TARGET_FMT_plx
1244 "size %u\n", __func__, offset, size);
1245 trace_gicv3_its_badread(offset, size);
1247 * The spec requires that reserved registers are RAZ/WI;
1248 * so use false returns from leaf functions as a way to
1249 * trigger the guest-error logging but don't return it to
1250 * the caller, or we'll cause a spurious guest data abort.
1254 trace_gicv3_its_read(offset, *data, size);
1259 static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
1260 unsigned size, MemTxAttrs attrs)
1262 GICv3ITSState *s = (GICv3ITSState *)opaque;
1267 result = its_writel(s, offset, data, attrs);
1270 result = its_writell(s, offset, data, attrs);
1278 qemu_log_mask(LOG_GUEST_ERROR,
1279 "%s: invalid guest write at offset " TARGET_FMT_plx
1280 "size %u\n", __func__, offset, size);
1281 trace_gicv3_its_badwrite(offset, data, size);
1283 * The spec requires that reserved registers are RAZ/WI;
1284 * so use false returns from leaf functions as a way to
1285 * trigger the guest-error logging but don't return it to
1286 * the caller, or we'll cause a spurious guest data abort.
1289 trace_gicv3_its_write(offset, data, size);
1294 static const MemoryRegionOps gicv3_its_control_ops = {
1295 .read_with_attrs = gicv3_its_read,
1296 .write_with_attrs = gicv3_its_write,
1297 .valid.min_access_size = 4,
1298 .valid.max_access_size = 8,
1299 .impl.min_access_size = 4,
1300 .impl.max_access_size = 8,
1301 .endianness = DEVICE_NATIVE_ENDIAN,
1304 static const MemoryRegionOps gicv3_its_translation_ops = {
1305 .read_with_attrs = gicv3_its_translation_read,
1306 .write_with_attrs = gicv3_its_translation_write,
1307 .valid.min_access_size = 2,
1308 .valid.max_access_size = 4,
1309 .impl.min_access_size = 2,
1310 .impl.max_access_size = 4,
1311 .endianness = DEVICE_NATIVE_ENDIAN,
1314 static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
1316 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1319 for (i = 0; i < s->gicv3->num_cpu; i++) {
1320 if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
1321 error_setg(errp, "Physical LPI not supported by CPU %d", i);
1326 gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
1328 /* set the ITS default features supported */
1329 s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
1330 s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
1331 ITS_ITT_ENTRY_SIZE - 1);
1332 s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
1333 s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
1334 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
1335 s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
1338 static void gicv3_its_reset(DeviceState *dev)
1340 GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
1341 GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
1343 c->parent_reset(dev);
1345 /* Quiescent bit reset to 1 */
1346 s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
1349 * setting GITS_BASER0.Type = 0b001 (Device)
1350 * GITS_BASER1.Type = 0b100 (Collection Table)
1351 * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
1352 * GITS_BASER<0,1>.Page_Size = 64KB
1353 * and default translation table entry size to 16 bytes
1355 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
1356 GITS_BASER_TYPE_DEVICE);
1357 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
1358 GITS_BASER_PAGESIZE_64K);
1359 s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
1362 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
1363 GITS_BASER_TYPE_COLLECTION);
1364 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
1365 GITS_BASER_PAGESIZE_64K);
1366 s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
1370 static void gicv3_its_post_load(GICv3ITSState *s)
1372 if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
1373 extract_table_params(s);
1374 extract_cmdq_params(s);
1378 static Property gicv3_its_props[] = {
1379 DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
1381 DEFINE_PROP_END_OF_LIST(),
1384 static void gicv3_its_class_init(ObjectClass *klass, void *data)
1386 DeviceClass *dc = DEVICE_CLASS(klass);
1387 GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
1388 GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
1390 dc->realize = gicv3_arm_its_realize;
1391 device_class_set_props(dc, gicv3_its_props);
1392 device_class_set_parent_reset(dc, gicv3_its_reset, &ic->parent_reset);
1393 icc->post_load = gicv3_its_post_load;
1396 static const TypeInfo gicv3_its_info = {
1397 .name = TYPE_ARM_GICV3_ITS,
1398 .parent = TYPE_ARM_GICV3_ITS_COMMON,
1399 .instance_size = sizeof(GICv3ITSState),
1400 .class_init = gicv3_its_class_init,
1401 .class_size = sizeof(GICv3ITSClass),
1404 static void gicv3_its_register_types(void)
1406 type_register_static(&gicv3_its_info);
1409 type_init(gicv3_its_register_types)