#include "qemu/osdep.h"
#include "qemu/log.h"
+#include "trace.h"
#include "hw/qdev-properties.h"
#include "hw/intc/arm_gicv3_its_common.h"
#include "gicv3_internal.h"
INTERRUPT = 3,
} ItsCmdType;
-typedef struct {
- uint32_t iteh;
- uint64_t itel;
-} IteEntry;
+typedef struct DTEntry {
+ bool valid;
+ unsigned size;
+ uint64_t ittaddr;
+} DTEntry;
+
+typedef struct CTEntry {
+ bool valid;
+ uint32_t rdbase;
+} CTEntry;
+
+typedef struct ITEntry {
+ bool valid;
+ int inttype;
+ uint32_t intid;
+ uint32_t doorbell;
+ uint32_t icid;
+ uint32_t vpeid;
+} ITEntry;
+
+typedef struct VTEntry {
+ bool valid;
+ unsigned vptsize;
+ uint32_t rdbase;
+ uint64_t vptaddr;
+} VTEntry;
+
+/*
+ * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
+ * if a command parameter is not correct. These include both "stall
+ * processing of the command queue" and "ignore this command, and
+ * keep processing the queue". In our implementation we choose that
+ * memory transaction errors reading the command packet provoke a
+ * stall, but errors in parameters cause us to ignore the command
+ * and continue processing.
+ * The process_* functions which handle individual ITS commands all
+ * return an ItsCmdResult which tells process_cmdq() whether it should
+ * stall or keep going.
+ */
+typedef enum ItsCmdResult {
+ CMD_STALL = 0,
+ CMD_CONTINUE = 1,
+} ItsCmdResult;
+
+/* True if the ITS supports the GICv4 virtual LPI feature */
+static bool its_feature_virtual(GICv3ITSState *s)
+{
+ return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
+}
+
+static inline bool intid_in_lpi_range(uint32_t id)
+{
+ return id >= GICV3_LPI_INTID_START &&
+ id < (1 << (GICD_TYPER_IDBITS + 1));
+}
+
+static inline bool valid_doorbell(uint32_t id)
+{
+ /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
+ return id == INTID_SPURIOUS || intid_in_lpi_range(id);
+}
static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
{
return result;
}
-static bool get_cte(GICv3ITSState *s, uint16_t icid, uint64_t *cte,
- MemTxResult *res)
+static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
+ uint32_t idx, MemTxResult *res)
{
+ /*
+ * Given a TableDesc describing one of the ITS in-guest-memory
+ * tables and an index into it, return the guest address
+ * corresponding to that table entry.
+ * If there was a memory error reading the L1 table of an
+ * indirect table, *res is set accordingly, and we return -1.
+ * If the L1 table entry is marked not valid, we return -1 with
+ * *res set to MEMTX_OK.
+ *
+ * The specification defines the format of level 1 entries of a
+ * 2-level table, but the format of level 2 entries and the format
+ * of flat-mapped tables is IMPDEF.
+ */
AddressSpace *as = &s->gicv3->dma_as;
- uint64_t l2t_addr;
- uint64_t value;
- bool valid_l2t;
- uint32_t l2t_id;
+ uint32_t l2idx;
+ uint64_t l2;
uint32_t num_l2_entries;
- if (s->ct.indirect) {
- l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
-
- value = address_space_ldq_le(as,
- s->ct.base_addr +
- (l2t_id * L1TABLE_ENTRY_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
-
- if (*res == MEMTX_OK) {
- valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+ *res = MEMTX_OK;
- if (valid_l2t) {
- num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+ if (!td->indirect) {
+ /* Single level table */
+ return td->base_addr + idx * td->entry_sz;
+ }
- l2t_addr = value & ((1ULL << 51) - 1);
+ /* Two level table */
+ l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
- *cte = address_space_ldq_le(as, l2t_addr +
- ((icid % num_l2_entries) * GITS_CTE_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
- }
- }
- } else {
- /* Flat level table */
- *cte = address_space_ldq_le(as, s->ct.base_addr +
- (icid * GITS_CTE_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
+ l2 = address_space_ldq_le(as,
+ td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
+ MEMTXATTRS_UNSPECIFIED, res);
+ if (*res != MEMTX_OK) {
+ return -1;
+ }
+ if (!(l2 & L2_TABLE_VALID_MASK)) {
+ return -1;
}
- return FIELD_EX64(*cte, CTE, VALID);
+ num_l2_entries = td->page_sz / td->entry_sz;
+ return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
}
-static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
- IteEntry ite)
+/*
+ * Read the Collection Table entry at index @icid. On success (including
+ * successfully determining that there is no valid CTE for this index),
+ * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
+ * If there is an error reading memory then we return the error code.
+ */
+static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
{
AddressSpace *as = &s->gicv3->dma_as;
- uint64_t itt_addr;
MemTxResult res = MEMTX_OK;
+ uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
+ uint64_t cteval;
- itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
- itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
-
- address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
- sizeof(uint32_t))), ite.itel, MEMTXATTRS_UNSPECIFIED,
- &res);
+ if (entry_addr == -1) {
+ /* No L2 table entry, i.e. no valid CTE, or a memory error */
+ cte->valid = false;
+ goto out;
+ }
- if (res == MEMTX_OK) {
- address_space_stl_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
- sizeof(uint32_t))) + sizeof(uint32_t), ite.iteh,
- MEMTXATTRS_UNSPECIFIED, &res);
+ cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ goto out;
}
+ cte->valid = FIELD_EX64(cteval, CTE, VALID);
+ cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
+out:
if (res != MEMTX_OK) {
- return false;
+ trace_gicv3_its_cte_read_fault(icid);
} else {
- return true;
+ trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
}
+ return res;
}
-static bool get_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
- uint16_t *icid, uint32_t *pIntid, MemTxResult *res)
+/*
+ * Update the Interrupt Table entry at index @evinted in the table specified
+ * by the dte @dte. Returns true on success, false if there was a memory
+ * access error.
+ */
+static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
+ const ITEntry *ite)
{
AddressSpace *as = &s->gicv3->dma_as;
- uint64_t itt_addr;
- bool status = false;
- IteEntry ite = {};
-
- itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
- itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
-
- ite.itel = address_space_ldq_le(as, itt_addr +
- (eventid * (sizeof(uint64_t) +
- sizeof(uint32_t))), MEMTXATTRS_UNSPECIFIED,
- res);
-
- if (*res == MEMTX_OK) {
- ite.iteh = address_space_ldl_le(as, itt_addr +
- (eventid * (sizeof(uint64_t) +
- sizeof(uint32_t))) + sizeof(uint32_t),
- MEMTXATTRS_UNSPECIFIED, res);
-
- if (*res == MEMTX_OK) {
- if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
- int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
- if (inttype == ITE_INTTYPE_PHYSICAL) {
- *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
- *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
- status = true;
- }
- }
- }
+ MemTxResult res = MEMTX_OK;
+ hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
+ uint64_t itel = 0;
+ uint32_t iteh = 0;
+
+ trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
+ ite->inttype, ite->intid, ite->icid,
+ ite->vpeid, ite->doorbell);
+
+ if (ite->valid) {
+ itel = FIELD_DP64(itel, ITE_L, VALID, 1);
+ itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
+ itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
+ itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
+ itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
+ iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
}
- return status;
+
+ address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ return false;
+ }
+ address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
+ return res == MEMTX_OK;
}
-static uint64_t get_dte(GICv3ITSState *s, uint32_t devid, MemTxResult *res)
+/*
+ * Read the Interrupt Table entry at index @eventid from the table specified
+ * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
+ * struct @ite accordingly. If there is an error reading memory then we return
+ * the error code.
+ */
+static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
+ const DTEntry *dte, ITEntry *ite)
{
AddressSpace *as = &s->gicv3->dma_as;
- uint64_t l2t_addr;
- uint64_t value;
- bool valid_l2t;
- uint32_t l2t_id;
- uint32_t num_l2_entries;
-
- if (s->dt.indirect) {
- l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
+ MemTxResult res = MEMTX_OK;
+ uint64_t itel;
+ uint32_t iteh;
+ hwaddr iteaddr = dte->ittaddr + eventid * ITS_ITT_ENTRY_SIZE;
- value = address_space_ldq_le(as,
- s->dt.base_addr +
- (l2t_id * L1TABLE_ENTRY_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
+ itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
+ return res;
+ }
- if (*res == MEMTX_OK) {
- valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+ iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
+ return res;
+ }
- if (valid_l2t) {
- num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+ ite->valid = FIELD_EX64(itel, ITE_L, VALID);
+ ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
+ ite->intid = FIELD_EX64(itel, ITE_L, INTID);
+ ite->icid = FIELD_EX64(itel, ITE_L, ICID);
+ ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
+ ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
+ trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
+ ite->inttype, ite->intid, ite->icid,
+ ite->vpeid, ite->doorbell);
+ return MEMTX_OK;
+}
- l2t_addr = value & ((1ULL << 51) - 1);
+/*
+ * Read the Device Table entry at index @devid. On success (including
+ * successfully determining that there is no valid DTE for this index),
+ * we return MEMTX_OK and populate the DTEntry struct accordingly.
+ * If there is an error reading memory then we return the error code.
+ */
+static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
+{
+ MemTxResult res = MEMTX_OK;
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
+ uint64_t dteval;
- value = address_space_ldq_le(as, l2t_addr +
- ((devid % num_l2_entries) * GITS_DTE_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
- }
- }
+ if (entry_addr == -1) {
+ /* No L2 table entry, i.e. no valid DTE, or a memory error */
+ dte->valid = false;
+ goto out;
+ }
+ dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
+ if (res != MEMTX_OK) {
+ goto out;
+ }
+ dte->valid = FIELD_EX64(dteval, DTE, VALID);
+ dte->size = FIELD_EX64(dteval, DTE, SIZE);
+ /* DTE word field stores bits [51:8] of the ITT address */
+ dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
+out:
+ if (res != MEMTX_OK) {
+ trace_gicv3_its_dte_read_fault(devid);
} else {
- /* Flat level table */
- value = address_space_ldq_le(as, s->dt.base_addr +
- (devid * GITS_DTE_SIZE),
- MEMTXATTRS_UNSPECIFIED, res);
+ trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
}
-
- return value;
+ return res;
}
/*
* 3. handling of ITS CLEAR command
* 4. handling of ITS DISCARD command
*/
-static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset,
- ItsCmdType cmd)
+static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
+ uint32_t eventid, ItsCmdType cmd)
{
- AddressSpace *as = &s->gicv3->dma_as;
- uint32_t devid, eventid;
- MemTxResult res = MEMTX_OK;
- bool dte_valid;
- uint64_t dte = 0;
- uint32_t max_eventid;
- uint16_t icid = 0;
- uint32_t pIntid = 0;
- bool ite_valid = false;
- uint64_t cte = 0;
- bool cte_valid = false;
- bool result = false;
- uint64_t rdbase;
-
- if (cmd == NONE) {
- devid = offset;
- } else {
- devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
+ uint64_t num_eventids;
+ DTEntry dte;
+ CTEntry cte;
+ ITEntry ite;
- offset += NUM_BYTES_IN_DW;
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
- MEMTXATTRS_UNSPECIFIED, &res);
+ if (devid >= s->dt.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: devid %d>=%d",
+ __func__, devid, s->dt.num_entries);
+ return CMD_CONTINUE;
}
- if (res != MEMTX_OK) {
- return result;
+ if (get_dte(s, devid, &dte) != MEMTX_OK) {
+ return CMD_STALL;
}
-
- eventid = (value & EVENTID_MASK);
-
- dte = get_dte(s, devid, &res);
-
- if (res != MEMTX_OK) {
- return result;
+ if (!dte.valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: "
+ "invalid dte for %d\n", __func__, devid);
+ return CMD_CONTINUE;
}
- dte_valid = FIELD_EX64(dte, DTE, VALID);
-
- if (dte_valid) {
- max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1);
- ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
+ num_eventids = 1ULL << (dte.size + 1);
+ if (eventid >= num_eventids) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: eventid %d >= %"
+ PRId64 "\n",
+ __func__, eventid, num_eventids);
+ return CMD_CONTINUE;
+ }
- if (res != MEMTX_OK) {
- return result;
- }
+ if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) {
+ return CMD_STALL;
+ }
- if (ite_valid) {
- cte_valid = get_cte(s, icid, &cte, &res);
- }
+ if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: invalid ITE\n",
+ __func__);
+ return CMD_CONTINUE;
+ }
- if (res != MEMTX_OK) {
- return result;
- }
- } else {
+ if (ite.icid >= s->ct.num_entries) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes: "
- "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n",
- __func__, dte, devid, res);
- return result;
+ "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
+ __func__, ite.icid);
+ return CMD_CONTINUE;
}
+ if (get_cte(s, ite.icid, &cte) != MEMTX_OK) {
+ return CMD_STALL;
+ }
+ if (!cte.valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: invalid CTE\n",
+ __func__);
+ return CMD_CONTINUE;
+ }
/*
- * In this implementation, in case of guest errors we ignore the
- * command and move onto the next command in the queue.
+ * Current implementation only supports rdbase == procnum
+ * Hence rdbase physical address is ignored
*/
- if (devid >= s->dt.num_ids) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes: devid %d>=%d",
- __func__, devid, s->dt.num_ids);
+ if (cte.rdbase >= s->gicv3->num_cpu) {
+ return CMD_CONTINUE;
+ }
- } else if (!dte_valid || !ite_valid || !cte_valid) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes: "
- "dte: %s, ite: %s, cte: %s\n",
- __func__,
- dte_valid ? "valid" : "invalid",
- ite_valid ? "valid" : "invalid",
- cte_valid ? "valid" : "invalid");
- } else if (eventid > max_eventid) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes: eventid %d > %d\n",
- __func__, eventid, max_eventid);
+ if ((cmd == CLEAR) || (cmd == DISCARD)) {
+ gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0);
} else {
- /*
- * Current implementation only supports rdbase == procnum
- * Hence rdbase physical address is ignored
- */
- rdbase = FIELD_EX64(cte, CTE, RDBASE);
+ gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1);
+ }
- if (rdbase >= s->gicv3->num_cpu) {
- return result;
- }
+ if (cmd == DISCARD) {
+ ITEntry ite = {};
+ /* remove mapping from interrupt translation table */
+ ite.valid = false;
+ return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
+ }
+ return CMD_CONTINUE;
+}
- if ((cmd == CLEAR) || (cmd == DISCARD)) {
- gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 0);
- } else {
- gicv3_redist_process_lpi(&s->gicv3->cpu[rdbase], pIntid, 1);
- }
+static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
+ ItsCmdType cmd)
+{
+ uint32_t devid, eventid;
- if (cmd == DISCARD) {
- IteEntry ite = {};
- /* remove mapping from interrupt translation table */
- result = update_ite(s, eventid, dte, ite);
- }
+ devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
+ eventid = cmdpkt[1] & EVENTID_MASK;
+ switch (cmd) {
+ case INTERRUPT:
+ trace_gicv3_its_cmd_int(devid, eventid);
+ break;
+ case CLEAR:
+ trace_gicv3_its_cmd_clear(devid, eventid);
+ break;
+ case DISCARD:
+ trace_gicv3_its_cmd_discard(devid, eventid);
+ break;
+ default:
+ g_assert_not_reached();
}
-
- return result;
+ return do_process_its_cmd(s, devid, eventid, cmd);
}
-static bool process_mapti(GICv3ITSState *s, uint64_t value, uint32_t offset,
- bool ignore_pInt)
+static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
+ bool ignore_pInt)
{
- AddressSpace *as = &s->gicv3->dma_as;
uint32_t devid, eventid;
uint32_t pIntid = 0;
- uint32_t max_eventid, max_Intid;
- bool dte_valid;
- MemTxResult res = MEMTX_OK;
+ uint64_t num_eventids;
uint16_t icid = 0;
- uint64_t dte = 0;
- bool result = false;
+ DTEntry dte;
+ ITEntry ite;
- devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
- offset += NUM_BYTES_IN_DW;
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
- MEMTXATTRS_UNSPECIFIED, &res);
-
- if (res != MEMTX_OK) {
- return result;
- }
-
- eventid = (value & EVENTID_MASK);
+ devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
+ eventid = cmdpkt[1] & EVENTID_MASK;
+ icid = cmdpkt[2] & ICID_MASK;
if (ignore_pInt) {
pIntid = eventid;
+ trace_gicv3_its_cmd_mapi(devid, eventid, icid);
} else {
- pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
+ pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
+ trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
}
- offset += NUM_BYTES_IN_DW;
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
- MEMTXATTRS_UNSPECIFIED, &res);
+ if (devid >= s->dt.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: devid %d>=%d",
+ __func__, devid, s->dt.num_entries);
+ return CMD_CONTINUE;
+ }
- if (res != MEMTX_OK) {
- return result;
+ if (get_dte(s, devid, &dte) != MEMTX_OK) {
+ return CMD_STALL;
}
+ num_eventids = 1ULL << (dte.size + 1);
- icid = value & ICID_MASK;
+ if (icid >= s->ct.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid ICID 0x%x >= 0x%x\n",
+ __func__, icid, s->ct.num_entries);
+ return CMD_CONTINUE;
+ }
- dte = get_dte(s, devid, &res);
+ if (!dte.valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: no valid DTE for devid 0x%x\n", __func__, devid);
+ return CMD_CONTINUE;
+ }
- if (res != MEMTX_OK) {
- return result;
+ if (eventid >= num_eventids) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
+ __func__, eventid, num_eventids);
+ return CMD_CONTINUE;
}
- dte_valid = FIELD_EX64(dte, DTE, VALID);
- max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1);
- max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
- if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids)
- || !dte_valid || (eventid > max_eventid) ||
- (((pIntid < GICV3_LPI_INTID_START) || (pIntid > max_Intid)) &&
- (pIntid != INTID_SPURIOUS))) {
+ if (!intid_in_lpi_range(pIntid)) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes "
- "devid %d or icid %d or eventid %d or pIntid %d or"
- "unmapped dte %d\n", __func__, devid, icid, eventid,
- pIntid, dte_valid);
- /*
- * in this implementation, in case of error
- * we ignore this command and move onto the next
- * command in the queue
- */
+ "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
+ return CMD_CONTINUE;
+ }
+
+ /* add ite entry to interrupt translation table */
+ ite.valid = true;
+ ite.inttype = ITE_INTTYPE_PHYSICAL;
+ ite.intid = pIntid;
+ ite.icid = icid;
+ ite.doorbell = INTID_SPURIOUS;
+ ite.vpeid = 0;
+ return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
+}
+
+static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
+ bool ignore_vintid)
+{
+ uint32_t devid, eventid, vintid, doorbell, vpeid;
+ uint32_t num_eventids;
+ DTEntry dte;
+ ITEntry ite;
+
+ if (!its_feature_virtual(s)) {
+ return CMD_CONTINUE;
+ }
+
+ devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID);
+ eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID);
+ vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID);
+ doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL);
+ if (ignore_vintid) {
+ vintid = eventid;
+ trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell);
} else {
- /* add ite entry to interrupt translation table */
- IteEntry ite = {};
- ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
- ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
- ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
- ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
- ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
+ vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID);
+ trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell);
+ }
+
+ if (devid >= s->dt.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
+ __func__, devid, s->dt.num_entries);
+ return CMD_CONTINUE;
+ }
- result = update_ite(s, eventid, dte, ite);
+ if (get_dte(s, devid, &dte) != MEMTX_OK) {
+ return CMD_STALL;
}
- return result;
+ if (!dte.valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: no entry in device table for DeviceID 0x%x\n",
+ __func__, devid);
+ return CMD_CONTINUE;
+ }
+
+ num_eventids = 1ULL << (dte.size + 1);
+
+ if (eventid >= num_eventids) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: EventID 0x%x too large for DeviceID 0x%x "
+ "(must be less than 0x%x)\n",
+ __func__, eventid, devid, num_eventids);
+ return CMD_CONTINUE;
+ }
+ if (!intid_in_lpi_range(vintid)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: VIntID 0x%x not a valid LPI\n",
+ __func__, vintid);
+ return CMD_CONTINUE;
+ }
+ if (!valid_doorbell(doorbell)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Doorbell %d not 1023 and not a valid LPI\n",
+ __func__, doorbell);
+ return CMD_CONTINUE;
+ }
+ if (vpeid >= s->vpet.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
+ __func__, vpeid, s->vpet.num_entries);
+ return CMD_CONTINUE;
+ }
+ /* add ite entry to interrupt translation table */
+ ite.valid = true;
+ ite.inttype = ITE_INTTYPE_VIRTUAL;
+ ite.intid = vintid;
+ ite.icid = 0;
+ ite.doorbell = doorbell;
+ ite.vpeid = vpeid;
+ return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL;
}
-static bool update_cte(GICv3ITSState *s, uint16_t icid, bool valid,
- uint64_t rdbase)
+/*
+ * Update the Collection Table entry for @icid to @cte. Returns true
+ * on success, false if there was a memory access error.
+ */
+static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
{
AddressSpace *as = &s->gicv3->dma_as;
- uint64_t value;
- uint64_t l2t_addr;
- bool valid_l2t;
- uint32_t l2t_id;
- uint32_t num_l2_entries;
- uint64_t cte = 0;
+ uint64_t entry_addr;
+ uint64_t cteval = 0;
MemTxResult res = MEMTX_OK;
- if (!s->ct.valid) {
- return true;
- }
+ trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
- if (valid) {
+ if (cte->valid) {
/* add mapping entry to collection table */
- cte = FIELD_DP64(cte, CTE, VALID, 1);
- cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
+ cteval = FIELD_DP64(cteval, CTE, VALID, 1);
+ cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
}
- /*
- * The specification defines the format of level 1 entries of a
- * 2-level table, but the format of level 2 entries and the format
- * of flat-mapped tables is IMPDEF.
- */
- if (s->ct.indirect) {
- l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
+ entry_addr = table_entry_addr(s, &s->ct, icid, &res);
+ if (res != MEMTX_OK) {
+ /* memory access error: stall */
+ return false;
+ }
+ if (entry_addr == -1) {
+ /* No L2 table for this index: discard write and continue */
+ return true;
+ }
- value = address_space_ldq_le(as,
- s->ct.base_addr +
- (l2t_id * L1TABLE_ENTRY_SIZE),
- MEMTXATTRS_UNSPECIFIED, &res);
+ address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
+ return res == MEMTX_OK;
+}
- if (res != MEMTX_OK) {
- return false;
- }
+static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
+{
+ uint16_t icid;
+ CTEntry cte;
- valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+ icid = cmdpkt[2] & ICID_MASK;
+ cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
+ if (cte.valid) {
+ cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
+ cte.rdbase &= RDBASE_PROCNUM_MASK;
+ } else {
+ cte.rdbase = 0;
+ }
+ trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
- if (valid_l2t) {
- num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+ if (icid >= s->ct.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
+ return CMD_CONTINUE;
+ }
+ if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
+ return CMD_CONTINUE;
+ }
- l2t_addr = value & ((1ULL << 51) - 1);
+ return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL;
+}
- address_space_stq_le(as, l2t_addr +
- ((icid % num_l2_entries) * GITS_CTE_SIZE),
- cte, MEMTXATTRS_UNSPECIFIED, &res);
- }
- } else {
- /* Flat level table */
- address_space_stq_le(as, s->ct.base_addr + (icid * GITS_CTE_SIZE),
- cte, MEMTXATTRS_UNSPECIFIED, &res);
+/*
+ * Update the Device Table entry for @devid to @dte. Returns true
+ * on success, false if there was a memory access error.
+ */
+static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
+{
+ AddressSpace *as = &s->gicv3->dma_as;
+ uint64_t entry_addr;
+ uint64_t dteval = 0;
+ MemTxResult res = MEMTX_OK;
+
+ trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
+
+ if (dte->valid) {
+ /* add mapping entry to device table */
+ dteval = FIELD_DP64(dteval, DTE, VALID, 1);
+ dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
+ dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
}
+
+ entry_addr = table_entry_addr(s, &s->dt, devid, &res);
if (res != MEMTX_OK) {
+ /* memory access error: stall */
return false;
- } else {
+ }
+ if (entry_addr == -1) {
+ /* No L2 table for this index: discard write and continue */
return true;
}
+ address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
+ return res == MEMTX_OK;
}
-static bool process_mapc(GICv3ITSState *s, uint32_t offset)
+static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
{
- AddressSpace *as = &s->gicv3->dma_as;
- uint16_t icid;
- uint64_t rdbase;
- bool valid;
- MemTxResult res = MEMTX_OK;
- bool result = false;
- uint64_t value;
+ uint32_t devid;
+ DTEntry dte;
- offset += NUM_BYTES_IN_DW;
- offset += NUM_BYTES_IN_DW;
+ devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
+ dte.size = cmdpkt[1] & SIZE_MASK;
+ dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
+ dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
- MEMTXATTRS_UNSPECIFIED, &res);
+ trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
- if (res != MEMTX_OK) {
- return result;
+ if (devid >= s->dt.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
+ devid, s->dt.num_entries);
+ return CMD_CONTINUE;
}
- icid = value & ICID_MASK;
+ if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ITS MAPD: invalid size %d\n", dte.size);
+ return CMD_CONTINUE;
+ }
+
+ return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL;
+}
+
+static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
+{
+ uint64_t rd1, rd2;
- rdbase = (value & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
- rdbase &= RDBASE_PROCNUM_MASK;
+ rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
+ rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
- valid = (value & CMD_FIELD_VALID_MASK);
+ trace_gicv3_its_cmd_movall(rd1, rd2);
- if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) {
+ if (rd1 >= s->gicv3->num_cpu) {
qemu_log_mask(LOG_GUEST_ERROR,
- "ITS MAPC: invalid collection table attributes "
- "icid %d rdbase %" PRIu64 "\n", icid, rdbase);
- /*
- * in this implementation, in case of error
- * we ignore this command and move onto the next
- * command in the queue
- */
- } else {
- result = update_cte(s, icid, valid, rdbase);
+ "%s: RDBASE1 %" PRId64
+ " out of range (must be less than %d)\n",
+ __func__, rd1, s->gicv3->num_cpu);
+ return CMD_CONTINUE;
+ }
+ if (rd2 >= s->gicv3->num_cpu) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: RDBASE2 %" PRId64
+ " out of range (must be less than %d)\n",
+ __func__, rd2, s->gicv3->num_cpu);
+ return CMD_CONTINUE;
}
- return result;
+ if (rd1 == rd2) {
+ /* Move to same target must succeed as a no-op */
+ return CMD_CONTINUE;
+ }
+
+ /* Move all pending LPIs from redistributor 1 to redistributor 2 */
+ gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
+
+ return CMD_CONTINUE;
}
-static bool update_dte(GICv3ITSState *s, uint32_t devid, bool valid,
- uint8_t size, uint64_t itt_addr)
+static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
{
- AddressSpace *as = &s->gicv3->dma_as;
- uint64_t value;
- uint64_t l2t_addr;
- bool valid_l2t;
- uint32_t l2t_id;
- uint32_t num_l2_entries;
- uint64_t dte = 0;
- MemTxResult res = MEMTX_OK;
+ uint32_t devid, eventid;
+ uint16_t new_icid;
+ uint64_t num_eventids;
+ DTEntry dte;
+ CTEntry old_cte, new_cte;
+ ITEntry old_ite;
- if (s->dt.valid) {
- if (valid) {
- /* add mapping entry to device table */
- dte = FIELD_DP64(dte, DTE, VALID, 1);
- dte = FIELD_DP64(dte, DTE, SIZE, size);
- dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
- }
- } else {
- return true;
+ devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
+ eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
+ new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
+
+ trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
+
+ if (devid >= s->dt.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: devid %d>=%d",
+ __func__, devid, s->dt.num_entries);
+ return CMD_CONTINUE;
+ }
+ if (get_dte(s, devid, &dte) != MEMTX_OK) {
+ return CMD_STALL;
}
- /*
- * The specification defines the format of level 1 entries of a
- * 2-level table, but the format of level 2 entries and the format
- * of flat-mapped tables is IMPDEF.
- */
- if (s->dt.indirect) {
- l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
+ if (!dte.valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: "
+ "invalid dte for %d\n", __func__, devid);
+ return CMD_CONTINUE;
+ }
- value = address_space_ldq_le(as,
- s->dt.base_addr +
- (l2t_id * L1TABLE_ENTRY_SIZE),
- MEMTXATTRS_UNSPECIFIED, &res);
+ num_eventids = 1ULL << (dte.size + 1);
+ if (eventid >= num_eventids) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: eventid %d >= %"
+ PRId64 "\n",
+ __func__, eventid, num_eventids);
+ return CMD_CONTINUE;
+ }
- if (res != MEMTX_OK) {
- return false;
- }
+ if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) {
+ return CMD_STALL;
+ }
- valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
+ if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: invalid ITE\n",
+ __func__);
+ return CMD_CONTINUE;
+ }
- if (valid_l2t) {
- num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+ if (old_ite.icid >= s->ct.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid ICID 0x%x in ITE (table corrupted?)\n",
+ __func__, old_ite.icid);
+ return CMD_CONTINUE;
+ }
- l2t_addr = value & ((1ULL << 51) - 1);
+ if (new_icid >= s->ct.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: ICID 0x%x\n",
+ __func__, new_icid);
+ return CMD_CONTINUE;
+ }
- address_space_stq_le(as, l2t_addr +
- ((devid % num_l2_entries) * GITS_DTE_SIZE),
- dte, MEMTXATTRS_UNSPECIFIED, &res);
- }
- } else {
- /* Flat level table */
- address_space_stq_le(as, s->dt.base_addr + (devid * GITS_DTE_SIZE),
- dte, MEMTXATTRS_UNSPECIFIED, &res);
+ if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) {
+ return CMD_STALL;
}
- if (res != MEMTX_OK) {
- return false;
- } else {
- return true;
+ if (!old_cte.valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: "
+ "invalid CTE for old ICID 0x%x\n",
+ __func__, old_ite.icid);
+ return CMD_CONTINUE;
+ }
+
+ if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) {
+ return CMD_STALL;
+ }
+ if (!new_cte.valid) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid command attributes: "
+ "invalid CTE for new ICID 0x%x\n",
+ __func__, new_icid);
+ return CMD_CONTINUE;
+ }
+
+ if (old_cte.rdbase >= s->gicv3->num_cpu) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: CTE has invalid rdbase 0x%x\n",
+ __func__, old_cte.rdbase);
+ return CMD_CONTINUE;
+ }
+
+ if (new_cte.rdbase >= s->gicv3->num_cpu) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: CTE has invalid rdbase 0x%x\n",
+ __func__, new_cte.rdbase);
+ return CMD_CONTINUE;
}
+
+ if (old_cte.rdbase != new_cte.rdbase) {
+ /* Move the LPI from the old redistributor to the new one */
+ gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
+ &s->gicv3->cpu[new_cte.rdbase],
+ old_ite.intid);
+ }
+
+ /* Update the ICID field in the interrupt translation table entry */
+ old_ite.icid = new_icid;
+ return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL;
}
-static bool process_mapd(GICv3ITSState *s, uint64_t value, uint32_t offset)
+/*
+ * Update the vPE Table entry at index @vpeid with the entry @vte.
+ * Returns true on success, false if there was a memory access error.
+ */
+static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte)
{
AddressSpace *as = &s->gicv3->dma_as;
- uint32_t devid;
- uint8_t size;
- uint64_t itt_addr;
- bool valid;
+ uint64_t entry_addr;
+ uint64_t vteval = 0;
MemTxResult res = MEMTX_OK;
- bool result = false;
- devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
+ trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr,
+ vte->rdbase);
- offset += NUM_BYTES_IN_DW;
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
- MEMTXATTRS_UNSPECIFIED, &res);
+ if (vte->valid) {
+ vteval = FIELD_DP64(vteval, VTE, VALID, 1);
+ vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize);
+ vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr);
+ vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase);
+ }
+ entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
if (res != MEMTX_OK) {
- return result;
+ return false;
}
+ if (entry_addr == -1) {
+ /* No L2 table for this index: discard write and continue */
+ return true;
+ }
+ address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res);
+ return res == MEMTX_OK;
+}
- size = (value & SIZE_MASK);
-
- offset += NUM_BYTES_IN_DW;
- value = address_space_ldq_le(as, s->cq.base_addr + offset,
- MEMTXATTRS_UNSPECIFIED, &res);
+static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt)
+{
+ VTEntry vte;
+ uint32_t vpeid;
- if (res != MEMTX_OK) {
- return result;
+ if (!its_feature_virtual(s)) {
+ return CMD_CONTINUE;
}
- itt_addr = (value & ITTADDR_MASK) >> ITTADDR_SHIFT;
+ vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID);
+ vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE);
+ vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V);
+ vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE);
+ vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR);
- valid = (value & CMD_FIELD_VALID_MASK);
+ trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid,
+ vte.vptaddr, vte.vptsize);
- if ((devid >= s->dt.num_ids) ||
- (size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
+ /*
+ * For GICv4.0 the VPT_size field is only 5 bits, whereas we
+ * define our field macros to include the full GICv4.1 8 bits.
+ * The range check on VPT_size will catch the cases where
+ * the guest set the RES0-in-GICv4.0 bits [7:6].
+ */
+ if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
qemu_log_mask(LOG_GUEST_ERROR,
- "ITS MAPD: invalid device table attributes "
- "devid %d or size %d\n", devid, size);
- /*
- * in this implementation, in case of error
- * we ignore this command and move onto the next
- * command in the queue
- */
- } else {
- result = update_dte(s, devid, valid, size, itt_addr);
+ "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize);
+ return CMD_CONTINUE;
}
- return result;
+ if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase);
+ return CMD_CONTINUE;
+ }
+
+ if (vpeid >= s->vpet.num_entries) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
+ __func__, vpeid, s->vpet.num_entries);
+ return CMD_CONTINUE;
+ }
+
+ return update_vte(s, vpeid, &vte) ? CMD_CONTINUE : CMD_STALL;
}
/*
uint32_t wr_offset = 0;
uint32_t rd_offset = 0;
uint32_t cq_offset = 0;
- uint64_t data;
AddressSpace *as = &s->gicv3->dma_as;
- MemTxResult res = MEMTX_OK;
- bool result = true;
uint8_t cmd;
int i;
}
while (wr_offset != rd_offset) {
+ ItsCmdResult result = CMD_CONTINUE;
+ void *hostmem;
+ hwaddr buflen;
+ uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
+
cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
- data = address_space_ldq_le(as, s->cq.base_addr + cq_offset,
- MEMTXATTRS_UNSPECIFIED, &res);
- if (res != MEMTX_OK) {
- result = false;
+
+ buflen = GITS_CMDQ_ENTRY_SIZE;
+ hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
+ &buflen, false, MEMTXATTRS_UNSPECIFIED);
+ if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
+ if (hostmem) {
+ address_space_unmap(as, hostmem, buflen, false, 0);
+ }
+ s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: could not read command at 0x%" PRIx64 "\n",
+ __func__, s->cq.base_addr + cq_offset);
+ break;
}
- cmd = (data & CMD_MASK);
+ for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
+ cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
+ }
+ address_space_unmap(as, hostmem, buflen, false, 0);
+
+ cmd = cmdpkt[0] & CMD_MASK;
+
+ trace_gicv3_its_process_command(rd_offset, cmd);
switch (cmd) {
case GITS_CMD_INT:
- res = process_its_cmd(s, data, cq_offset, INTERRUPT);
+ result = process_its_cmd(s, cmdpkt, INTERRUPT);
break;
case GITS_CMD_CLEAR:
- res = process_its_cmd(s, data, cq_offset, CLEAR);
+ result = process_its_cmd(s, cmdpkt, CLEAR);
break;
case GITS_CMD_SYNC:
/*
* is already consistent by the time SYNC command is executed.
* Hence no further processing is required for SYNC command.
*/
+ trace_gicv3_its_cmd_sync();
break;
case GITS_CMD_MAPD:
- result = process_mapd(s, data, cq_offset);
+ result = process_mapd(s, cmdpkt);
break;
case GITS_CMD_MAPC:
- result = process_mapc(s, cq_offset);
+ result = process_mapc(s, cmdpkt);
break;
case GITS_CMD_MAPTI:
- result = process_mapti(s, data, cq_offset, false);
+ result = process_mapti(s, cmdpkt, false);
break;
case GITS_CMD_MAPI:
- result = process_mapti(s, data, cq_offset, true);
+ result = process_mapti(s, cmdpkt, true);
break;
case GITS_CMD_DISCARD:
- result = process_its_cmd(s, data, cq_offset, DISCARD);
+ result = process_its_cmd(s, cmdpkt, DISCARD);
break;
case GITS_CMD_INV:
case GITS_CMD_INVALL:
* need to trigger lpi priority re-calculation to be in
* sync with LPI config table or pending table changes.
*/
+ trace_gicv3_its_cmd_inv();
for (i = 0; i < s->gicv3->num_cpu; i++) {
gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
}
break;
+ case GITS_CMD_MOVI:
+ result = process_movi(s, cmdpkt);
+ break;
+ case GITS_CMD_MOVALL:
+ result = process_movall(s, cmdpkt);
+ break;
+ case GITS_CMD_VMAPTI:
+ result = process_vmapti(s, cmdpkt, false);
+ break;
+ case GITS_CMD_VMAPI:
+ result = process_vmapti(s, cmdpkt, true);
+ break;
+ case GITS_CMD_VMAPP:
+ result = process_vmapp(s, cmdpkt);
+ break;
default:
+ trace_gicv3_its_cmd_unknown(cmd);
break;
}
- if (result) {
+ if (result == CMD_CONTINUE) {
rd_offset++;
rd_offset %= s->cq.num_entries;
s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
} else {
- /*
- * in this implementation, in case of dma read/write error
- * we stall the command processing
- */
+ /* CMD_STALL */
s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: %x cmd processing failed\n", __func__, cmd);
+ "%s: 0x%x cmd processing failed, stalling\n",
+ __func__, cmd);
break;
}
}
idbits = 16;
}
break;
+ case GITS_BASER_TYPE_VPE:
+ td = &s->vpet;
+ /*
+ * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
+ * implementation to implement fewer bits and report this
+ * via GICD_TYPER2.)
+ */
+ idbits = 16;
+ break;
default:
/*
* GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
}
memset(td, 0, sizeof(*td));
- td->valid = FIELD_EX64(value, GITS_BASER, VALID);
/*
* If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
* interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
* for the register corresponding to the Collection table but we
* still have to process interrupts using non-memory-backed
* Collection table entries.)
+ * The specification makes it UNPREDICTABLE to enable the ITS without
+ * marking each BASER<n> as valid. We choose to handle these as if
+ * the table was zero-sized, so commands using the table will fail
+ * and interrupts requested via GITS_TRANSLATER writes will be ignored.
+ * This happens automatically by leaving the num_entries field at
+ * zero, which will be caught by the bounds checks we have before
+ * every table lookup anyway.
*/
- if (!td->valid) {
+ if (!FIELD_EX64(value, GITS_BASER, VALID)) {
continue;
}
td->page_sz = page_sz;
L1TABLE_ENTRY_SIZE) *
(page_sz / td->entry_sz));
}
- td->num_ids = 1ULL << idbits;
+ td->num_entries = MIN(td->num_entries, 1ULL << idbits);
}
}
num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
memset(&s->cq, 0 , sizeof(s->cq));
- s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
- if (s->cq.valid) {
+ if (FIELD_EX64(value, GITS_CBASER, VALID)) {
s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
GITS_CMDQ_ENTRY_SIZE;
s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
}
}
+static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
+ uint64_t *data, unsigned size,
+ MemTxAttrs attrs)
+{
+ /*
+ * GITS_TRANSLATER is write-only, and all other addresses
+ * in the interrupt translation space frame are RES0.
+ */
+ *data = 0;
+ return MEMTX_OK;
+}
+
static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
uint64_t data, unsigned size,
MemTxAttrs attrs)
{
GICv3ITSState *s = (GICv3ITSState *)opaque;
bool result = true;
- uint32_t devid = 0;
+
+ trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
switch (offset) {
case GITS_TRANSLATER:
if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
- devid = attrs.requester_id;
- result = process_its_cmd(s, data, devid, NONE);
+ result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
}
break;
default:
s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
extract_table_params(s);
extract_cmdq_params(s);
- s->creadr = 0;
process_cmdq(s);
} else {
s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = deposit64(s->cbaser, 0, 32, value);
s->creadr = 0;
- s->cwriter = s->creadr;
}
break;
case GITS_CBASER + 4:
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = deposit64(s->cbaser, 32, 32, value);
s->creadr = 0;
- s->cwriter = s->creadr;
}
break;
case GITS_CWRITER:
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
index = (offset - GITS_BASER) / 8;
+ if (s->baser[index] == 0) {
+ /* Unimplemented GITS_BASERn: RAZ/WI */
+ break;
+ }
if (offset & 7) {
value <<= 32;
value &= ~GITS_BASER_RO_MASK;
break;
case GITS_IDREGS ... GITS_IDREGS + 0x2f:
/* ID registers */
- *data = gicv3_idreg(offset - GITS_IDREGS);
+ *data = gicv3_idreg(offset - GITS_IDREGS, GICV3_PIDR0_ITS);
break;
case GITS_TYPER:
*data = extract64(s->typer, 0, 32);
*/
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
index = (offset - GITS_BASER) / 8;
+ if (s->baser[index] == 0) {
+ /* Unimplemented GITS_BASERn: RAZ/WI */
+ break;
+ }
s->baser[index] &= GITS_BASER_RO_MASK;
s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
}
if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = value;
s->creadr = 0;
- s->cwriter = s->creadr;
}
break;
case GITS_CWRITER:
if (!result) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest read at offset " TARGET_FMT_plx
- "size %u\n", __func__, offset, size);
+ " size %u\n", __func__, offset, size);
+ trace_gicv3_its_badread(offset, size);
/*
* The spec requires that reserved registers are RAZ/WI;
* so use false returns from leaf functions as a way to
* the caller, or we'll cause a spurious guest data abort.
*/
*data = 0;
+ } else {
+ trace_gicv3_its_read(offset, *data, size);
}
return MEMTX_OK;
}
if (!result) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid guest write at offset " TARGET_FMT_plx
- "size %u\n", __func__, offset, size);
+ " size %u\n", __func__, offset, size);
+ trace_gicv3_its_badwrite(offset, data, size);
/*
* The spec requires that reserved registers are RAZ/WI;
* so use false returns from leaf functions as a way to
* trigger the guest-error logging but don't return it to
* the caller, or we'll cause a spurious guest data abort.
*/
+ } else {
+ trace_gicv3_its_write(offset, data, size);
}
return MEMTX_OK;
}
};
static const MemoryRegionOps gicv3_its_translation_ops = {
+ .read_with_attrs = gicv3_its_translation_read,
.write_with_attrs = gicv3_its_translation_write,
.valid.min_access_size = 2,
.valid.max_access_size = 4,
gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
- address_space_init(&s->gicv3->dma_as, s->gicv3->dma,
- "gicv3-its-sysmem");
-
/* set the ITS default features supported */
s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
/*
* setting GITS_BASER0.Type = 0b001 (Device)
* GITS_BASER1.Type = 0b100 (Collection Table)
+ * GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
* GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
* GITS_BASER<0,1>.Page_Size = 64KB
* and default translation table entry size to 16 bytes
GITS_BASER_PAGESIZE_64K);
s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
GITS_CTE_SIZE - 1);
+
+ if (its_feature_virtual(s)) {
+ s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
+ GITS_BASER_TYPE_VPE);
+ s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
+ GITS_BASER_PAGESIZE_64K);
+ s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
+ GITS_VPE_SIZE - 1);
+ }
}
static void gicv3_its_post_load(GICv3ITSState *s)