*/
#include "qemu/osdep.h"
-#include "hw/boards.h"
-#include "sysemu/sysemu.h"
+#include "hw/irq.h"
#include "hw/sysbus.h"
+#include "migration/vmstate.h"
#include "hw/qdev-core.h"
#include "hw/pci/pci.h"
#include "exec/address-spaces.h"
+#include "cpu.h"
#include "trace.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
return r;
}
- if (smmuv3_q_empty(q)) {
+ if (!smmuv3_q_empty(q)) {
smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
}
return MEMTX_OK;
void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
{
- Evt evt;
+ Evt evt = {};
MemTxResult r;
if (!smmuv3_eventq_enabled(s)) {
EVT_SET_SID(&evt, info->sid);
switch (info->type) {
- case SMMU_EVT_OK:
+ case SMMU_EVT_NONE:
return;
case SMMU_EVT_F_UUT:
EVT_SET_SSID(&evt, info->u.f_uut.ssid);
s->sid_split = 0;
}
+static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
+ SMMUEventInfo *event)
+{
+ int ret;
+
+ trace_smmuv3_get_ste(addr);
+ /* TODO: guarantee 64-bit single-copy atomicity */
+ ret = dma_memory_read(&address_space_memory, addr,
+ (void *)buf, sizeof(*buf));
+ if (ret != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
+ event->type = SMMU_EVT_F_STE_FETCH;
+ event->u.f_ste_fetch.addr = addr;
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+/* @ssid > 0 not supported yet */
+static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
+ CD *buf, SMMUEventInfo *event)
+{
+ dma_addr_t addr = STE_CTXPTR(ste);
+ int ret;
+
+ trace_smmuv3_get_cd(addr);
+ /* TODO: guarantee 64-bit single-copy atomicity */
+ ret = dma_memory_read(&address_space_memory, addr,
+ (void *)buf, sizeof(*buf));
+ if (ret != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
+ event->type = SMMU_EVT_F_CD_FETCH;
+ event->u.f_ste_fetch.addr = addr;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Returns < 0 in case of invalid STE, 0 otherwise */
+static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
+ STE *ste, SMMUEventInfo *event)
+{
+ uint32_t config;
+
+ if (!STE_VALID(ste)) {
+ if (!event->inval_ste_allowed) {
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
+ }
+ goto bad_ste;
+ }
+
+ config = STE_CONFIG(ste);
+
+ if (STE_CFG_ABORT(config)) {
+ cfg->aborted = true;
+ return 0;
+ }
+
+ if (STE_CFG_BYPASS(config)) {
+ cfg->bypassed = true;
+ return 0;
+ }
+
+ if (STE_CFG_S2_ENABLED(config)) {
+ qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
+ goto bad_ste;
+ }
+
+ if (STE_S1CDMAX(ste) != 0) {
+ qemu_log_mask(LOG_UNIMP,
+ "SMMUv3 does not support multiple context descriptors yet\n");
+ goto bad_ste;
+ }
+
+ if (STE_S1STALLD(ste)) {
+ qemu_log_mask(LOG_UNIMP,
+ "SMMUv3 S1 stalling fault model not allowed yet\n");
+ goto bad_ste;
+ }
+ return 0;
+
+bad_ste:
+ event->type = SMMU_EVT_C_BAD_STE;
+ return -EINVAL;
+}
+
+/**
+ * smmu_find_ste - Return the stream table entry associated
+ * to the sid
+ *
+ * @s: smmuv3 handle
+ * @sid: stream ID
+ * @ste: returned stream table entry
+ * @event: handle to an event info
+ *
+ * Supports linear and 2-level stream table
+ * Return 0 on success, -EINVAL otherwise
+ */
+static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
+ SMMUEventInfo *event)
+{
+ dma_addr_t addr;
+ int ret;
+
+ trace_smmuv3_find_ste(sid, s->features, s->sid_split);
+ /* Check SID range */
+ if (sid > (1 << SMMU_IDR1_SIDSIZE)) {
+ event->type = SMMU_EVT_C_BAD_STREAMID;
+ return -EINVAL;
+ }
+ if (s->features & SMMU_FEATURE_2LVL_STE) {
+ int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
+ dma_addr_t strtab_base, l1ptr, l2ptr;
+ STEDesc l1std;
+
+ strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
+ l1_ste_offset = sid >> s->sid_split;
+ l2_ste_offset = sid & ((1 << s->sid_split) - 1);
+ l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
+ /* TODO: guarantee 64-bit single-copy atomicity */
+ ret = dma_memory_read(&address_space_memory, l1ptr,
+ (uint8_t *)&l1std, sizeof(l1std));
+ if (ret != MEMTX_OK) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
+ event->type = SMMU_EVT_F_STE_FETCH;
+ event->u.f_ste_fetch.addr = l1ptr;
+ return -EINVAL;
+ }
+
+ span = L1STD_SPAN(&l1std);
+
+ if (!span) {
+ /* l2ptr is not valid */
+ if (!event->inval_ste_allowed) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "invalid sid=%d (L1STD span=0)\n", sid);
+ }
+ event->type = SMMU_EVT_C_BAD_STREAMID;
+ return -EINVAL;
+ }
+ max_l2_ste = (1 << span) - 1;
+ l2ptr = l1std_l2ptr(&l1std);
+ trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
+ l2ptr, l2_ste_offset, max_l2_ste);
+ if (l2_ste_offset > max_l2_ste) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "l2_ste_offset=%d > max_l2_ste=%d\n",
+ l2_ste_offset, max_l2_ste);
+ event->type = SMMU_EVT_C_BAD_STE;
+ return -EINVAL;
+ }
+ addr = l2ptr + l2_ste_offset * sizeof(*ste);
+ } else {
+ addr = (s->strtab_base & SMMU_BASE_ADDR_MASK) + sid * sizeof(*ste);
+ }
+
+ if (smmu_get_ste(s, addr, ste, event)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
+{
+ int ret = -EINVAL;
+ int i;
+
+ if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
+ goto bad_cd;
+ }
+ if (!CD_A(cd)) {
+ goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
+ }
+ if (CD_S(cd)) {
+ goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
+ }
+ if (CD_HA(cd) || CD_HD(cd)) {
+ goto bad_cd; /* HTTU = 0 */
+ }
+
+ /* we support only those at the moment */
+ cfg->aa64 = true;
+ cfg->stage = 1;
+
+ cfg->oas = oas2bits(CD_IPS(cd));
+ cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
+ cfg->tbi = CD_TBI(cd);
+ cfg->asid = CD_ASID(cd);
+
+ trace_smmuv3_decode_cd(cfg->oas);
+
+ /* decode data dependent on TT */
+ for (i = 0; i <= 1; i++) {
+ int tg, tsz;
+ SMMUTransTableInfo *tt = &cfg->tt[i];
+
+ cfg->tt[i].disabled = CD_EPD(cd, i);
+ if (cfg->tt[i].disabled) {
+ continue;
+ }
+
+ tsz = CD_TSZ(cd, i);
+ if (tsz < 16 || tsz > 39) {
+ goto bad_cd;
+ }
+
+ tg = CD_TG(cd, i);
+ tt->granule_sz = tg2granule(tg, i);
+ if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
+ goto bad_cd;
+ }
+
+ tt->tsz = tsz;
+ tt->ttb = CD_TTB(cd, i);
+ if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
+ goto bad_cd;
+ }
+ trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
+ }
+
+ event->record_trans_faults = CD_R(cd);
+
+ return 0;
+
+bad_cd:
+ event->type = SMMU_EVT_C_BAD_CD;
+ return ret;
+}
+
+/**
+ * smmuv3_decode_config - Prepare the translation configuration
+ * for the @mr iommu region
+ * @mr: iommu memory region the translation config must be prepared for
+ * @cfg: output translation configuration which is populated through
+ * the different configuration decoding steps
+ * @event: must be zero'ed by the caller
+ *
+ * return < 0 in case of config decoding error (@event is filled
+ * accordingly). Return 0 otherwise.
+ */
+static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
+ SMMUEventInfo *event)
+{
+ SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
+ uint32_t sid = smmu_get_sid(sdev);
+ SMMUv3State *s = sdev->smmu;
+ int ret;
+ STE ste;
+ CD cd;
+
+ ret = smmu_find_ste(s, sid, &ste, event);
+ if (ret) {
+ return ret;
+ }
+
+ ret = decode_ste(s, cfg, &ste, event);
+ if (ret) {
+ return ret;
+ }
+
+ if (cfg->aborted || cfg->bypassed) {
+ return 0;
+ }
+
+ ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
+ if (ret) {
+ return ret;
+ }
+
+ return decode_cd(cfg, &cd, event);
+}
+
+/**
+ * smmuv3_get_config - Look up for a cached copy of configuration data for
+ * @sdev and on cache miss performs a configuration structure decoding from
+ * guest RAM.
+ *
+ * @sdev: SMMUDevice handle
+ * @event: output event info
+ *
+ * The configuration cache contains data resulting from both STE and CD
+ * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
+ * by the SMMUDevice handle.
+ */
+static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
+{
+ SMMUv3State *s = sdev->smmu;
+ SMMUState *bc = &s->smmu_state;
+ SMMUTransCfg *cfg;
+
+ cfg = g_hash_table_lookup(bc->configs, sdev);
+ if (cfg) {
+ sdev->cfg_cache_hits++;
+ trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
+ sdev->cfg_cache_hits, sdev->cfg_cache_misses,
+ 100 * sdev->cfg_cache_hits /
+ (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
+ } else {
+ sdev->cfg_cache_misses++;
+ trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
+ sdev->cfg_cache_hits, sdev->cfg_cache_misses,
+ 100 * sdev->cfg_cache_hits /
+ (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
+ cfg = g_new0(SMMUTransCfg, 1);
+
+ if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
+ g_hash_table_insert(bc->configs, sdev, cfg);
+ } else {
+ g_free(cfg);
+ cfg = NULL;
+ }
+ }
+ return cfg;
+}
+
+static void smmuv3_flush_config(SMMUDevice *sdev)
+{
+ SMMUv3State *s = sdev->smmu;
+ SMMUState *bc = &s->smmu_state;
+
+ trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
+ g_hash_table_remove(bc->configs, sdev);
+}
+
+static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
+ IOMMUAccessFlags flag, int iommu_idx)
+{
+ SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
+ SMMUv3State *s = sdev->smmu;
+ uint32_t sid = smmu_get_sid(sdev);
+ SMMUEventInfo event = {.type = SMMU_EVT_NONE,
+ .sid = sid,
+ .inval_ste_allowed = false};
+ SMMUPTWEventInfo ptw_info = {};
+ SMMUTranslationStatus status;
+ SMMUState *bs = ARM_SMMU(s);
+ uint64_t page_mask, aligned_addr;
+ IOMMUTLBEntry *cached_entry = NULL;
+ SMMUTransTableInfo *tt;
+ SMMUTransCfg *cfg = NULL;
+ IOMMUTLBEntry entry = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = addr,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE,
+ };
+ SMMUIOTLBKey key, *new_key;
+
+ qemu_mutex_lock(&s->mutex);
+
+ if (!smmu_enabled(s)) {
+ status = SMMU_TRANS_DISABLE;
+ goto epilogue;
+ }
+
+ cfg = smmuv3_get_config(sdev, &event);
+ if (!cfg) {
+ status = SMMU_TRANS_ERROR;
+ goto epilogue;
+ }
+
+ if (cfg->aborted) {
+ status = SMMU_TRANS_ABORT;
+ goto epilogue;
+ }
+
+ if (cfg->bypassed) {
+ status = SMMU_TRANS_BYPASS;
+ goto epilogue;
+ }
+
+ tt = select_tt(cfg, addr);
+ if (!tt) {
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_TRANSLATION;
+ event.u.f_translation.addr = addr;
+ event.u.f_translation.rnw = flag & 0x1;
+ }
+ status = SMMU_TRANS_ERROR;
+ goto epilogue;
+ }
+
+ page_mask = (1ULL << (tt->granule_sz)) - 1;
+ aligned_addr = addr & ~page_mask;
+
+ key.asid = cfg->asid;
+ key.iova = aligned_addr;
+
+ cached_entry = g_hash_table_lookup(bs->iotlb, &key);
+ if (cached_entry) {
+ cfg->iotlb_hits++;
+ trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr,
+ cfg->iotlb_hits, cfg->iotlb_misses,
+ 100 * cfg->iotlb_hits /
+ (cfg->iotlb_hits + cfg->iotlb_misses));
+ if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) {
+ status = SMMU_TRANS_ERROR;
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_PERMISSION;
+ event.u.f_permission.addr = addr;
+ event.u.f_permission.rnw = flag & 0x1;
+ }
+ } else {
+ status = SMMU_TRANS_SUCCESS;
+ }
+ goto epilogue;
+ }
+
+ cfg->iotlb_misses++;
+ trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask,
+ cfg->iotlb_hits, cfg->iotlb_misses,
+ 100 * cfg->iotlb_hits /
+ (cfg->iotlb_hits + cfg->iotlb_misses));
+
+ if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
+ smmu_iotlb_inv_all(bs);
+ }
+
+ cached_entry = g_new0(IOMMUTLBEntry, 1);
+
+ if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
+ g_free(cached_entry);
+ switch (ptw_info.type) {
+ case SMMU_PTW_ERR_WALK_EABT:
+ event.type = SMMU_EVT_F_WALK_EABT;
+ event.u.f_walk_eabt.addr = addr;
+ event.u.f_walk_eabt.rnw = flag & 0x1;
+ event.u.f_walk_eabt.class = 0x1;
+ event.u.f_walk_eabt.addr2 = ptw_info.addr;
+ break;
+ case SMMU_PTW_ERR_TRANSLATION:
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_TRANSLATION;
+ event.u.f_translation.addr = addr;
+ event.u.f_translation.rnw = flag & 0x1;
+ }
+ break;
+ case SMMU_PTW_ERR_ADDR_SIZE:
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_ADDR_SIZE;
+ event.u.f_addr_size.addr = addr;
+ event.u.f_addr_size.rnw = flag & 0x1;
+ }
+ break;
+ case SMMU_PTW_ERR_ACCESS:
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_ACCESS;
+ event.u.f_access.addr = addr;
+ event.u.f_access.rnw = flag & 0x1;
+ }
+ break;
+ case SMMU_PTW_ERR_PERMISSION:
+ if (event.record_trans_faults) {
+ event.type = SMMU_EVT_F_PERMISSION;
+ event.u.f_permission.addr = addr;
+ event.u.f_permission.rnw = flag & 0x1;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ status = SMMU_TRANS_ERROR;
+ } else {
+ new_key = g_new0(SMMUIOTLBKey, 1);
+ new_key->asid = cfg->asid;
+ new_key->iova = aligned_addr;
+ g_hash_table_insert(bs->iotlb, new_key, cached_entry);
+ status = SMMU_TRANS_SUCCESS;
+ }
+
+epilogue:
+ qemu_mutex_unlock(&s->mutex);
+ switch (status) {
+ case SMMU_TRANS_SUCCESS:
+ entry.perm = flag;
+ entry.translated_addr = cached_entry->translated_addr +
+ (addr & page_mask);
+ entry.addr_mask = cached_entry->addr_mask;
+ trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
+ entry.translated_addr, entry.perm);
+ break;
+ case SMMU_TRANS_DISABLE:
+ entry.perm = flag;
+ entry.addr_mask = ~TARGET_PAGE_MASK;
+ trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
+ entry.perm);
+ break;
+ case SMMU_TRANS_BYPASS:
+ entry.perm = flag;
+ entry.addr_mask = ~TARGET_PAGE_MASK;
+ trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
+ entry.perm);
+ break;
+ case SMMU_TRANS_ABORT:
+ /* no event is recorded on abort */
+ trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
+ entry.perm);
+ break;
+ case SMMU_TRANS_ERROR:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s translation failed for iova=0x%"PRIx64"(%s)\n",
+ mr->parent_obj.name, addr, smmu_event_string(event.type));
+ smmuv3_record_event(s, &event);
+ break;
+ }
+
+ return entry;
+}
+
+/**
+ * smmuv3_notify_iova - call the notifier @n for a given
+ * @asid and @iova tuple.
+ *
+ * @mr: IOMMU mr region handle
+ * @n: notifier to be called
+ * @asid: address space ID or negative value if we don't care
+ * @iova: iova
+ */
+static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
+ IOMMUNotifier *n,
+ int asid,
+ dma_addr_t iova)
+{
+ SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
+ SMMUEventInfo event = {.inval_ste_allowed = true};
+ SMMUTransTableInfo *tt;
+ SMMUTransCfg *cfg;
+ IOMMUTLBEntry entry;
+
+ cfg = smmuv3_get_config(sdev, &event);
+ if (!cfg) {
+ return;
+ }
+
+ if (asid >= 0 && cfg->asid != asid) {
+ return;
+ }
+
+ tt = select_tt(cfg, iova);
+ if (!tt) {
+ return;
+ }
+
+ entry.target_as = &address_space_memory;
+ entry.iova = iova;
+ entry.addr_mask = (1 << tt->granule_sz) - 1;
+ entry.perm = IOMMU_NONE;
+
+ memory_region_notify_one(n, &entry);
+}
+
+/* invalidate an asid/iova tuple in all mr's */
+static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova)
+{
+ SMMUDevice *sdev;
+
+ QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
+ IOMMUMemoryRegion *mr = &sdev->iommu;
+ IOMMUNotifier *n;
+
+ trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova);
+
+ IOMMU_NOTIFIER_FOREACH(n, mr) {
+ smmuv3_notify_iova(mr, n, asid, iova);
+ }
+ }
+}
+
static int smmuv3_cmdq_consume(SMMUv3State *s)
{
+ SMMUState *bs = ARM_SMMU(s);
SMMUCmdError cmd_error = SMMU_CERROR_NONE;
SMMUQueue *q = &s->cmdq;
SMMUCommandType type = 0;
trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
+ qemu_mutex_lock(&s->mutex);
switch (type) {
case SMMU_CMD_SYNC:
if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
break;
case SMMU_CMD_PREFETCH_CONFIG:
case SMMU_CMD_PREFETCH_ADDR:
+ break;
case SMMU_CMD_CFGI_STE:
+ {
+ uint32_t sid = CMD_SID(&cmd);
+ IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
+ SMMUDevice *sdev;
+
+ if (CMD_SSEC(&cmd)) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
+
+ if (!mr) {
+ break;
+ }
+
+ trace_smmuv3_cmdq_cfgi_ste(sid);
+ sdev = container_of(mr, SMMUDevice, iommu);
+ smmuv3_flush_config(sdev);
+
+ break;
+ }
case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
+ {
+ uint32_t start = CMD_SID(&cmd), end, i;
+ uint8_t range = CMD_STE_RANGE(&cmd);
+
+ if (CMD_SSEC(&cmd)) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
+
+ end = start + (1 << (range + 1)) - 1;
+ trace_smmuv3_cmdq_cfgi_ste_range(start, end);
+
+ for (i = start; i <= end; i++) {
+ IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i);
+ SMMUDevice *sdev;
+
+ if (!mr) {
+ continue;
+ }
+ sdev = container_of(mr, SMMUDevice, iommu);
+ smmuv3_flush_config(sdev);
+ }
+ break;
+ }
case SMMU_CMD_CFGI_CD:
case SMMU_CMD_CFGI_CD_ALL:
- case SMMU_CMD_TLBI_NH_ALL:
+ {
+ uint32_t sid = CMD_SID(&cmd);
+ IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
+ SMMUDevice *sdev;
+
+ if (CMD_SSEC(&cmd)) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
+
+ if (!mr) {
+ break;
+ }
+
+ trace_smmuv3_cmdq_cfgi_cd(sid);
+ sdev = container_of(mr, SMMUDevice, iommu);
+ smmuv3_flush_config(sdev);
+ break;
+ }
case SMMU_CMD_TLBI_NH_ASID:
- case SMMU_CMD_TLBI_NH_VA:
+ {
+ uint16_t asid = CMD_ASID(&cmd);
+
+ trace_smmuv3_cmdq_tlbi_nh_asid(asid);
+ smmu_inv_notifiers_all(&s->smmu_state);
+ smmu_iotlb_inv_asid(bs, asid);
+ break;
+ }
+ case SMMU_CMD_TLBI_NH_ALL:
+ case SMMU_CMD_TLBI_NSNH_ALL:
+ trace_smmuv3_cmdq_tlbi_nh();
+ smmu_inv_notifiers_all(&s->smmu_state);
+ smmu_iotlb_inv_all(bs);
+ break;
case SMMU_CMD_TLBI_NH_VAA:
+ {
+ dma_addr_t addr = CMD_ADDR(&cmd);
+ uint16_t vmid = CMD_VMID(&cmd);
+
+ trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr);
+ smmuv3_inv_notifiers_iova(bs, -1, addr);
+ smmu_iotlb_inv_all(bs);
+ break;
+ }
+ case SMMU_CMD_TLBI_NH_VA:
+ {
+ uint16_t asid = CMD_ASID(&cmd);
+ uint16_t vmid = CMD_VMID(&cmd);
+ dma_addr_t addr = CMD_ADDR(&cmd);
+ bool leaf = CMD_LEAF(&cmd);
+
+ trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf);
+ smmuv3_inv_notifiers_iova(bs, asid, addr);
+ smmu_iotlb_inv_iova(bs, asid, addr);
+ break;
+ }
case SMMU_CMD_TLBI_EL3_ALL:
case SMMU_CMD_TLBI_EL3_VA:
case SMMU_CMD_TLBI_EL2_ALL:
case SMMU_CMD_TLBI_EL2_VAA:
case SMMU_CMD_TLBI_S12_VMALL:
case SMMU_CMD_TLBI_S2_IPA:
- case SMMU_CMD_TLBI_NSNH_ALL:
case SMMU_CMD_ATC_INV:
case SMMU_CMD_PRI_RESP:
case SMMU_CMD_RESUME:
"Illegal command type: %d\n", CMD_TYPE(&cmd));
break;
}
+ qemu_mutex_unlock(&s->mutex);
if (cmd_error) {
break;
}
uint64_t *data, MemTxAttrs attrs)
{
switch (offset) {
- case A_IDREGS ... A_IDREGS + 0x1f:
+ case A_IDREGS ... A_IDREGS + 0x2f:
*data = smmuv3_idreg(offset - A_IDREGS);
return MEMTX_OK;
case A_IDR0 ... A_IDR5:
return;
}
+ qemu_mutex_init(&s->mutex);
+
memory_region_init_io(&sys->iomem, OBJECT(s),
&smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
VMSTATE_UINT32(prod, SMMUQueue),
VMSTATE_UINT32(cons, SMMUQueue),
VMSTATE_UINT8(log2size, SMMUQueue),
+ VMSTATE_END_OF_LIST(),
},
};
dc->realize = smmu_realize;
}
+static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
+ IOMMUNotifierFlag old,
+ IOMMUNotifierFlag new,
+ Error **errp)
+{
+ SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
+ SMMUv3State *s3 = sdev->smmu;
+ SMMUState *s = &(s3->smmu_state);
+
+ if (new & IOMMU_NOTIFIER_MAP) {
+ error_setg(errp,
+ "device %02x.%02x.%x requires iommu MAP notifier which is "
+ "not currently supported", pci_bus_num(sdev->bus),
+ PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
+ return -EINVAL;
+ }
+
+ if (old == IOMMU_NOTIFIER_NONE) {
+ trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
+ QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
+ } else if (new == IOMMU_NOTIFIER_NONE) {
+ trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
+ QLIST_REMOVE(sdev, next);
+ }
+ return 0;
+}
+
static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
void *data)
{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = smmuv3_translate;
+ imrc->notify_flag_changed = smmuv3_notify_flag_changed;
}
static const TypeInfo smmuv3_type_info = {