2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "hw/sysbus.h"
22 #include "migration/vmstate.h"
23 #include "hw/qdev-core.h"
24 #include "hw/pci/pci.h"
25 #include "exec/address-spaces.h"
29 #include "qemu/error-report.h"
30 #include "qapi/error.h"
32 #include "hw/arm/smmuv3.h"
33 #include "smmuv3-internal.h"
36 * smmuv3_trigger_irq - pulse @irq if enabled and update
37 * GERROR register in case of GERROR interrupt
40 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
42 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
50 pulse = smmuv3_eventq_irq_enabled(s);
53 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
55 case SMMU_IRQ_CMD_SYNC:
60 uint32_t pending = s->gerror ^ s->gerrorn;
61 uint32_t new_gerrors = ~pending & gerror_mask;
64 /* only toggle non pending errors */
67 s->gerror ^= new_gerrors;
68 trace_smmuv3_write_gerror(new_gerrors, s->gerror);
70 pulse = smmuv3_gerror_irq_enabled(s);
75 trace_smmuv3_trigger_irq(irq);
76 qemu_irq_pulse(s->irq[irq]);
80 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
82 uint32_t pending = s->gerror ^ s->gerrorn;
83 uint32_t toggled = s->gerrorn ^ new_gerrorn;
85 if (toggled & ~pending) {
86 qemu_log_mask(LOG_GUEST_ERROR,
87 "guest toggles non pending errors = 0x%x\n",
92 * We do not raise any error in case guest toggles bits corresponding
93 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
95 s->gerrorn = new_gerrorn;
97 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
100 static inline MemTxResult queue_read(SMMUQueue *q, void *data)
102 dma_addr_t addr = Q_CONS_ENTRY(q);
104 return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
107 static MemTxResult queue_write(SMMUQueue *q, void *data)
109 dma_addr_t addr = Q_PROD_ENTRY(q);
112 ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
113 if (ret != MEMTX_OK) {
121 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
123 SMMUQueue *q = &s->eventq;
126 if (!smmuv3_eventq_enabled(s)) {
130 if (smmuv3_q_full(q)) {
134 r = queue_write(q, evt);
139 if (!smmuv3_q_empty(q)) {
140 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
145 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
150 if (!smmuv3_eventq_enabled(s)) {
154 EVT_SET_TYPE(&evt, info->type);
155 EVT_SET_SID(&evt, info->sid);
157 switch (info->type) {
161 EVT_SET_SSID(&evt, info->u.f_uut.ssid);
162 EVT_SET_SSV(&evt, info->u.f_uut.ssv);
163 EVT_SET_ADDR(&evt, info->u.f_uut.addr);
164 EVT_SET_RNW(&evt, info->u.f_uut.rnw);
165 EVT_SET_PNU(&evt, info->u.f_uut.pnu);
166 EVT_SET_IND(&evt, info->u.f_uut.ind);
168 case SMMU_EVT_C_BAD_STREAMID:
169 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
170 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
172 case SMMU_EVT_F_STE_FETCH:
173 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
174 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
175 EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr);
177 case SMMU_EVT_C_BAD_STE:
178 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
179 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
181 case SMMU_EVT_F_STREAM_DISABLED:
183 case SMMU_EVT_F_TRANS_FORBIDDEN:
184 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
185 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
187 case SMMU_EVT_C_BAD_SUBSTREAMID:
188 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
190 case SMMU_EVT_F_CD_FETCH:
191 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
192 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
193 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
195 case SMMU_EVT_C_BAD_CD:
196 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
197 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
199 case SMMU_EVT_F_WALK_EABT:
200 case SMMU_EVT_F_TRANSLATION:
201 case SMMU_EVT_F_ADDR_SIZE:
202 case SMMU_EVT_F_ACCESS:
203 case SMMU_EVT_F_PERMISSION:
204 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
205 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
206 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
207 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
208 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
209 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
210 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
211 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
212 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
213 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
214 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
216 case SMMU_EVT_F_CFG_CONFLICT:
217 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
218 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
220 /* rest is not implemented */
221 case SMMU_EVT_F_BAD_ATS_TREQ:
222 case SMMU_EVT_F_TLB_CONFLICT:
223 case SMMU_EVT_E_PAGE_REQ:
225 g_assert_not_reached();
228 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
229 r = smmuv3_write_eventq(s, &evt);
231 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
233 info->recorded = true;
236 static void smmuv3_init_regs(SMMUv3State *s)
239 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
240 * multi-level stream table
242 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
243 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
244 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
245 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
246 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
247 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
248 /* terminated transaction will always be aborted/error returned */
249 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
250 /* 2-level stream table supported */
251 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
253 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
254 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
255 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
257 /* 4K and 64K granule support */
258 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
259 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
260 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
262 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
265 s->cmdq.entry_size = sizeof(struct Cmd);
266 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
269 s->eventq.entry_size = sizeof(struct Evt);
275 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
276 SMMUEventInfo *event)
280 trace_smmuv3_get_ste(addr);
281 /* TODO: guarantee 64-bit single-copy atomicity */
282 ret = dma_memory_read(&address_space_memory, addr,
283 (void *)buf, sizeof(*buf));
284 if (ret != MEMTX_OK) {
285 qemu_log_mask(LOG_GUEST_ERROR,
286 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
287 event->type = SMMU_EVT_F_STE_FETCH;
288 event->u.f_ste_fetch.addr = addr;
295 /* @ssid > 0 not supported yet */
296 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
297 CD *buf, SMMUEventInfo *event)
299 dma_addr_t addr = STE_CTXPTR(ste);
302 trace_smmuv3_get_cd(addr);
303 /* TODO: guarantee 64-bit single-copy atomicity */
304 ret = dma_memory_read(&address_space_memory, addr,
305 (void *)buf, sizeof(*buf));
306 if (ret != MEMTX_OK) {
307 qemu_log_mask(LOG_GUEST_ERROR,
308 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
309 event->type = SMMU_EVT_F_CD_FETCH;
310 event->u.f_ste_fetch.addr = addr;
316 /* Returns < 0 in case of invalid STE, 0 otherwise */
317 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
318 STE *ste, SMMUEventInfo *event)
322 if (!STE_VALID(ste)) {
323 if (!event->inval_ste_allowed) {
324 qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
329 config = STE_CONFIG(ste);
331 if (STE_CFG_ABORT(config)) {
336 if (STE_CFG_BYPASS(config)) {
337 cfg->bypassed = true;
341 if (STE_CFG_S2_ENABLED(config)) {
342 qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
346 if (STE_S1CDMAX(ste) != 0) {
347 qemu_log_mask(LOG_UNIMP,
348 "SMMUv3 does not support multiple context descriptors yet\n");
352 if (STE_S1STALLD(ste)) {
353 qemu_log_mask(LOG_UNIMP,
354 "SMMUv3 S1 stalling fault model not allowed yet\n");
360 event->type = SMMU_EVT_C_BAD_STE;
365 * smmu_find_ste - Return the stream table entry associated
370 * @ste: returned stream table entry
371 * @event: handle to an event info
373 * Supports linear and 2-level stream table
374 * Return 0 on success, -EINVAL otherwise
376 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
377 SMMUEventInfo *event)
382 trace_smmuv3_find_ste(sid, s->features, s->sid_split);
383 /* Check SID range */
384 if (sid > (1 << SMMU_IDR1_SIDSIZE)) {
385 event->type = SMMU_EVT_C_BAD_STREAMID;
388 if (s->features & SMMU_FEATURE_2LVL_STE) {
389 int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
390 dma_addr_t strtab_base, l1ptr, l2ptr;
393 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
394 l1_ste_offset = sid >> s->sid_split;
395 l2_ste_offset = sid & ((1 << s->sid_split) - 1);
396 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
397 /* TODO: guarantee 64-bit single-copy atomicity */
398 ret = dma_memory_read(&address_space_memory, l1ptr,
399 (uint8_t *)&l1std, sizeof(l1std));
400 if (ret != MEMTX_OK) {
401 qemu_log_mask(LOG_GUEST_ERROR,
402 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
403 event->type = SMMU_EVT_F_STE_FETCH;
404 event->u.f_ste_fetch.addr = l1ptr;
408 span = L1STD_SPAN(&l1std);
411 /* l2ptr is not valid */
412 if (!event->inval_ste_allowed) {
413 qemu_log_mask(LOG_GUEST_ERROR,
414 "invalid sid=%d (L1STD span=0)\n", sid);
416 event->type = SMMU_EVT_C_BAD_STREAMID;
419 max_l2_ste = (1 << span) - 1;
420 l2ptr = l1std_l2ptr(&l1std);
421 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
422 l2ptr, l2_ste_offset, max_l2_ste);
423 if (l2_ste_offset > max_l2_ste) {
424 qemu_log_mask(LOG_GUEST_ERROR,
425 "l2_ste_offset=%d > max_l2_ste=%d\n",
426 l2_ste_offset, max_l2_ste);
427 event->type = SMMU_EVT_C_BAD_STE;
430 addr = l2ptr + l2_ste_offset * sizeof(*ste);
432 addr = (s->strtab_base & SMMU_BASE_ADDR_MASK) + sid * sizeof(*ste);
435 if (smmu_get_ste(s, addr, ste, event)) {
442 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
447 if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
451 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
454 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
456 if (CD_HA(cd) || CD_HD(cd)) {
457 goto bad_cd; /* HTTU = 0 */
460 /* we support only those at the moment */
464 cfg->oas = oas2bits(CD_IPS(cd));
465 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
466 cfg->tbi = CD_TBI(cd);
467 cfg->asid = CD_ASID(cd);
469 trace_smmuv3_decode_cd(cfg->oas);
471 /* decode data dependent on TT */
472 for (i = 0; i <= 1; i++) {
474 SMMUTransTableInfo *tt = &cfg->tt[i];
476 cfg->tt[i].disabled = CD_EPD(cd, i);
477 if (cfg->tt[i].disabled) {
482 if (tsz < 16 || tsz > 39) {
487 tt->granule_sz = tg2granule(tg, i);
488 if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
493 tt->ttb = CD_TTB(cd, i);
494 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
497 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
500 event->record_trans_faults = CD_R(cd);
505 event->type = SMMU_EVT_C_BAD_CD;
510 * smmuv3_decode_config - Prepare the translation configuration
511 * for the @mr iommu region
512 * @mr: iommu memory region the translation config must be prepared for
513 * @cfg: output translation configuration which is populated through
514 * the different configuration decoding steps
515 * @event: must be zero'ed by the caller
517 * return < 0 in case of config decoding error (@event is filled
518 * accordingly). Return 0 otherwise.
520 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
521 SMMUEventInfo *event)
523 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
524 uint32_t sid = smmu_get_sid(sdev);
525 SMMUv3State *s = sdev->smmu;
530 ret = smmu_find_ste(s, sid, &ste, event);
535 ret = decode_ste(s, cfg, &ste, event);
540 if (cfg->aborted || cfg->bypassed) {
544 ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
549 return decode_cd(cfg, &cd, event);
553 * smmuv3_get_config - Look up for a cached copy of configuration data for
554 * @sdev and on cache miss performs a configuration structure decoding from
557 * @sdev: SMMUDevice handle
558 * @event: output event info
560 * The configuration cache contains data resulting from both STE and CD
561 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
562 * by the SMMUDevice handle.
564 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
566 SMMUv3State *s = sdev->smmu;
567 SMMUState *bc = &s->smmu_state;
570 cfg = g_hash_table_lookup(bc->configs, sdev);
572 sdev->cfg_cache_hits++;
573 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
574 sdev->cfg_cache_hits, sdev->cfg_cache_misses,
575 100 * sdev->cfg_cache_hits /
576 (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
578 sdev->cfg_cache_misses++;
579 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
580 sdev->cfg_cache_hits, sdev->cfg_cache_misses,
581 100 * sdev->cfg_cache_hits /
582 (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
583 cfg = g_new0(SMMUTransCfg, 1);
585 if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
586 g_hash_table_insert(bc->configs, sdev, cfg);
595 static void smmuv3_flush_config(SMMUDevice *sdev)
597 SMMUv3State *s = sdev->smmu;
598 SMMUState *bc = &s->smmu_state;
600 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
601 g_hash_table_remove(bc->configs, sdev);
604 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
605 IOMMUAccessFlags flag, int iommu_idx)
607 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
608 SMMUv3State *s = sdev->smmu;
609 uint32_t sid = smmu_get_sid(sdev);
610 SMMUEventInfo event = {.type = SMMU_EVT_NONE,
612 .inval_ste_allowed = false};
613 SMMUPTWEventInfo ptw_info = {};
614 SMMUTranslationStatus status;
615 SMMUState *bs = ARM_SMMU(s);
616 uint64_t page_mask, aligned_addr;
617 IOMMUTLBEntry *cached_entry = NULL;
618 SMMUTransTableInfo *tt;
619 SMMUTransCfg *cfg = NULL;
620 IOMMUTLBEntry entry = {
621 .target_as = &address_space_memory,
623 .translated_addr = addr,
624 .addr_mask = ~(hwaddr)0,
627 SMMUIOTLBKey key, *new_key;
629 qemu_mutex_lock(&s->mutex);
631 if (!smmu_enabled(s)) {
632 status = SMMU_TRANS_DISABLE;
636 cfg = smmuv3_get_config(sdev, &event);
638 status = SMMU_TRANS_ERROR;
643 status = SMMU_TRANS_ABORT;
648 status = SMMU_TRANS_BYPASS;
652 tt = select_tt(cfg, addr);
654 if (event.record_trans_faults) {
655 event.type = SMMU_EVT_F_TRANSLATION;
656 event.u.f_translation.addr = addr;
657 event.u.f_translation.rnw = flag & 0x1;
659 status = SMMU_TRANS_ERROR;
663 page_mask = (1ULL << (tt->granule_sz)) - 1;
664 aligned_addr = addr & ~page_mask;
666 key.asid = cfg->asid;
667 key.iova = aligned_addr;
669 cached_entry = g_hash_table_lookup(bs->iotlb, &key);
672 trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr,
673 cfg->iotlb_hits, cfg->iotlb_misses,
674 100 * cfg->iotlb_hits /
675 (cfg->iotlb_hits + cfg->iotlb_misses));
676 if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) {
677 status = SMMU_TRANS_ERROR;
678 if (event.record_trans_faults) {
679 event.type = SMMU_EVT_F_PERMISSION;
680 event.u.f_permission.addr = addr;
681 event.u.f_permission.rnw = flag & 0x1;
684 status = SMMU_TRANS_SUCCESS;
690 trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask,
691 cfg->iotlb_hits, cfg->iotlb_misses,
692 100 * cfg->iotlb_hits /
693 (cfg->iotlb_hits + cfg->iotlb_misses));
695 if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
696 smmu_iotlb_inv_all(bs);
699 cached_entry = g_new0(IOMMUTLBEntry, 1);
701 if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
702 g_free(cached_entry);
703 switch (ptw_info.type) {
704 case SMMU_PTW_ERR_WALK_EABT:
705 event.type = SMMU_EVT_F_WALK_EABT;
706 event.u.f_walk_eabt.addr = addr;
707 event.u.f_walk_eabt.rnw = flag & 0x1;
708 event.u.f_walk_eabt.class = 0x1;
709 event.u.f_walk_eabt.addr2 = ptw_info.addr;
711 case SMMU_PTW_ERR_TRANSLATION:
712 if (event.record_trans_faults) {
713 event.type = SMMU_EVT_F_TRANSLATION;
714 event.u.f_translation.addr = addr;
715 event.u.f_translation.rnw = flag & 0x1;
718 case SMMU_PTW_ERR_ADDR_SIZE:
719 if (event.record_trans_faults) {
720 event.type = SMMU_EVT_F_ADDR_SIZE;
721 event.u.f_addr_size.addr = addr;
722 event.u.f_addr_size.rnw = flag & 0x1;
725 case SMMU_PTW_ERR_ACCESS:
726 if (event.record_trans_faults) {
727 event.type = SMMU_EVT_F_ACCESS;
728 event.u.f_access.addr = addr;
729 event.u.f_access.rnw = flag & 0x1;
732 case SMMU_PTW_ERR_PERMISSION:
733 if (event.record_trans_faults) {
734 event.type = SMMU_EVT_F_PERMISSION;
735 event.u.f_permission.addr = addr;
736 event.u.f_permission.rnw = flag & 0x1;
740 g_assert_not_reached();
742 status = SMMU_TRANS_ERROR;
744 new_key = g_new0(SMMUIOTLBKey, 1);
745 new_key->asid = cfg->asid;
746 new_key->iova = aligned_addr;
747 g_hash_table_insert(bs->iotlb, new_key, cached_entry);
748 status = SMMU_TRANS_SUCCESS;
752 qemu_mutex_unlock(&s->mutex);
754 case SMMU_TRANS_SUCCESS:
756 entry.translated_addr = cached_entry->translated_addr +
758 entry.addr_mask = cached_entry->addr_mask;
759 trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
760 entry.translated_addr, entry.perm);
762 case SMMU_TRANS_DISABLE:
764 entry.addr_mask = ~TARGET_PAGE_MASK;
765 trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
768 case SMMU_TRANS_BYPASS:
770 entry.addr_mask = ~TARGET_PAGE_MASK;
771 trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
774 case SMMU_TRANS_ABORT:
775 /* no event is recorded on abort */
776 trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
779 case SMMU_TRANS_ERROR:
780 qemu_log_mask(LOG_GUEST_ERROR,
781 "%s translation failed for iova=0x%"PRIx64"(%s)\n",
782 mr->parent_obj.name, addr, smmu_event_string(event.type));
783 smmuv3_record_event(s, &event);
791 * smmuv3_notify_iova - call the notifier @n for a given
792 * @asid and @iova tuple.
794 * @mr: IOMMU mr region handle
795 * @n: notifier to be called
796 * @asid: address space ID or negative value if we don't care
799 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
804 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
805 SMMUEventInfo event = {.inval_ste_allowed = true};
806 SMMUTransTableInfo *tt;
810 cfg = smmuv3_get_config(sdev, &event);
815 if (asid >= 0 && cfg->asid != asid) {
819 tt = select_tt(cfg, iova);
824 entry.target_as = &address_space_memory;
826 entry.addr_mask = (1 << tt->granule_sz) - 1;
827 entry.perm = IOMMU_NONE;
829 memory_region_notify_one(n, &entry);
832 /* invalidate an asid/iova tuple in all mr's */
833 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova)
837 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
838 IOMMUMemoryRegion *mr = &sdev->iommu;
841 trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova);
843 IOMMU_NOTIFIER_FOREACH(n, mr) {
844 smmuv3_notify_iova(mr, n, asid, iova);
849 static int smmuv3_cmdq_consume(SMMUv3State *s)
851 SMMUState *bs = ARM_SMMU(s);
852 SMMUCmdError cmd_error = SMMU_CERROR_NONE;
853 SMMUQueue *q = &s->cmdq;
854 SMMUCommandType type = 0;
856 if (!smmuv3_cmdq_enabled(s)) {
860 * some commands depend on register values, typically CR0. In case those
861 * register values change while handling the command, spec says it
862 * is UNPREDICTABLE whether the command is interpreted under the new
866 while (!smmuv3_q_empty(q)) {
867 uint32_t pending = s->gerror ^ s->gerrorn;
870 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
871 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
873 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
877 if (queue_read(q, &cmd) != MEMTX_OK) {
878 cmd_error = SMMU_CERROR_ABT;
882 type = CMD_TYPE(&cmd);
884 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
886 qemu_mutex_lock(&s->mutex);
889 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
890 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
893 case SMMU_CMD_PREFETCH_CONFIG:
894 case SMMU_CMD_PREFETCH_ADDR:
896 case SMMU_CMD_CFGI_STE:
898 uint32_t sid = CMD_SID(&cmd);
899 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
902 if (CMD_SSEC(&cmd)) {
903 cmd_error = SMMU_CERROR_ILL;
911 trace_smmuv3_cmdq_cfgi_ste(sid);
912 sdev = container_of(mr, SMMUDevice, iommu);
913 smmuv3_flush_config(sdev);
917 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
919 uint32_t start = CMD_SID(&cmd), end, i;
920 uint8_t range = CMD_STE_RANGE(&cmd);
922 if (CMD_SSEC(&cmd)) {
923 cmd_error = SMMU_CERROR_ILL;
927 end = start + (1 << (range + 1)) - 1;
928 trace_smmuv3_cmdq_cfgi_ste_range(start, end);
930 for (i = start; i <= end; i++) {
931 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i);
937 sdev = container_of(mr, SMMUDevice, iommu);
938 smmuv3_flush_config(sdev);
942 case SMMU_CMD_CFGI_CD:
943 case SMMU_CMD_CFGI_CD_ALL:
945 uint32_t sid = CMD_SID(&cmd);
946 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
949 if (CMD_SSEC(&cmd)) {
950 cmd_error = SMMU_CERROR_ILL;
958 trace_smmuv3_cmdq_cfgi_cd(sid);
959 sdev = container_of(mr, SMMUDevice, iommu);
960 smmuv3_flush_config(sdev);
963 case SMMU_CMD_TLBI_NH_ASID:
965 uint16_t asid = CMD_ASID(&cmd);
967 trace_smmuv3_cmdq_tlbi_nh_asid(asid);
968 smmu_inv_notifiers_all(&s->smmu_state);
969 smmu_iotlb_inv_asid(bs, asid);
972 case SMMU_CMD_TLBI_NH_ALL:
973 case SMMU_CMD_TLBI_NSNH_ALL:
974 trace_smmuv3_cmdq_tlbi_nh();
975 smmu_inv_notifiers_all(&s->smmu_state);
976 smmu_iotlb_inv_all(bs);
978 case SMMU_CMD_TLBI_NH_VAA:
980 dma_addr_t addr = CMD_ADDR(&cmd);
981 uint16_t vmid = CMD_VMID(&cmd);
983 trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr);
984 smmuv3_inv_notifiers_iova(bs, -1, addr);
985 smmu_iotlb_inv_all(bs);
988 case SMMU_CMD_TLBI_NH_VA:
990 uint16_t asid = CMD_ASID(&cmd);
991 uint16_t vmid = CMD_VMID(&cmd);
992 dma_addr_t addr = CMD_ADDR(&cmd);
993 bool leaf = CMD_LEAF(&cmd);
995 trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf);
996 smmuv3_inv_notifiers_iova(bs, asid, addr);
997 smmu_iotlb_inv_iova(bs, asid, addr);
1000 case SMMU_CMD_TLBI_EL3_ALL:
1001 case SMMU_CMD_TLBI_EL3_VA:
1002 case SMMU_CMD_TLBI_EL2_ALL:
1003 case SMMU_CMD_TLBI_EL2_ASID:
1004 case SMMU_CMD_TLBI_EL2_VA:
1005 case SMMU_CMD_TLBI_EL2_VAA:
1006 case SMMU_CMD_TLBI_S12_VMALL:
1007 case SMMU_CMD_TLBI_S2_IPA:
1008 case SMMU_CMD_ATC_INV:
1009 case SMMU_CMD_PRI_RESP:
1010 case SMMU_CMD_RESUME:
1011 case SMMU_CMD_STALL_TERM:
1012 trace_smmuv3_unhandled_cmd(type);
1015 cmd_error = SMMU_CERROR_ILL;
1016 qemu_log_mask(LOG_GUEST_ERROR,
1017 "Illegal command type: %d\n", CMD_TYPE(&cmd));
1020 qemu_mutex_unlock(&s->mutex);
1025 * We only increment the cons index after the completion of
1026 * the command. We do that because the SYNC returns immediately
1027 * and does not check the completion of previous commands
1033 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1034 smmu_write_cmdq_err(s, cmd_error);
1035 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1038 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1039 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1044 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1045 uint64_t data, MemTxAttrs attrs)
1048 case A_GERROR_IRQ_CFG0:
1049 s->gerror_irq_cfg0 = data;
1052 s->strtab_base = data;
1055 s->cmdq.base = data;
1056 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1057 if (s->cmdq.log2size > SMMU_CMDQS) {
1058 s->cmdq.log2size = SMMU_CMDQS;
1062 s->eventq.base = data;
1063 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1064 if (s->eventq.log2size > SMMU_EVENTQS) {
1065 s->eventq.log2size = SMMU_EVENTQS;
1068 case A_EVENTQ_IRQ_CFG0:
1069 s->eventq_irq_cfg0 = data;
1072 qemu_log_mask(LOG_UNIMP,
1073 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1079 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1080 uint64_t data, MemTxAttrs attrs)
1085 s->cr0ack = data & ~SMMU_CR0_RESERVED;
1086 /* in case the command queue has been enabled */
1087 smmuv3_cmdq_consume(s);
1099 smmuv3_write_gerrorn(s, data);
1101 * By acknowledging the CMDQ_ERR, SW may notify cmds can
1102 * be processed again
1104 smmuv3_cmdq_consume(s);
1106 case A_GERROR_IRQ_CFG0: /* 64b */
1107 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1109 case A_GERROR_IRQ_CFG0 + 4:
1110 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1112 case A_GERROR_IRQ_CFG1:
1113 s->gerror_irq_cfg1 = data;
1115 case A_GERROR_IRQ_CFG2:
1116 s->gerror_irq_cfg2 = data;
1118 case A_STRTAB_BASE: /* 64b */
1119 s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1121 case A_STRTAB_BASE + 4:
1122 s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1124 case A_STRTAB_BASE_CFG:
1125 s->strtab_base_cfg = data;
1126 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1127 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1128 s->features |= SMMU_FEATURE_2LVL_STE;
1131 case A_CMDQ_BASE: /* 64b */
1132 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1133 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1134 if (s->cmdq.log2size > SMMU_CMDQS) {
1135 s->cmdq.log2size = SMMU_CMDQS;
1138 case A_CMDQ_BASE + 4: /* 64b */
1139 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1142 s->cmdq.prod = data;
1143 smmuv3_cmdq_consume(s);
1146 s->cmdq.cons = data;
1148 case A_EVENTQ_BASE: /* 64b */
1149 s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1150 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1151 if (s->eventq.log2size > SMMU_EVENTQS) {
1152 s->eventq.log2size = SMMU_EVENTQS;
1155 case A_EVENTQ_BASE + 4:
1156 s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1159 s->eventq.prod = data;
1162 s->eventq.cons = data;
1164 case A_EVENTQ_IRQ_CFG0: /* 64b */
1165 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1167 case A_EVENTQ_IRQ_CFG0 + 4:
1168 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1170 case A_EVENTQ_IRQ_CFG1:
1171 s->eventq_irq_cfg1 = data;
1173 case A_EVENTQ_IRQ_CFG2:
1174 s->eventq_irq_cfg2 = data;
1177 qemu_log_mask(LOG_UNIMP,
1178 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1184 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1185 unsigned size, MemTxAttrs attrs)
1187 SMMUState *sys = opaque;
1188 SMMUv3State *s = ARM_SMMUV3(sys);
1191 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1196 r = smmu_writell(s, offset, data, attrs);
1199 r = smmu_writel(s, offset, data, attrs);
1206 trace_smmuv3_write_mmio(offset, data, size, r);
1210 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1211 uint64_t *data, MemTxAttrs attrs)
1214 case A_GERROR_IRQ_CFG0:
1215 *data = s->gerror_irq_cfg0;
1218 *data = s->strtab_base;
1221 *data = s->cmdq.base;
1224 *data = s->eventq.base;
1228 qemu_log_mask(LOG_UNIMP,
1229 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1235 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1236 uint64_t *data, MemTxAttrs attrs)
1239 case A_IDREGS ... A_IDREGS + 0x2f:
1240 *data = smmuv3_idreg(offset - A_IDREGS);
1242 case A_IDR0 ... A_IDR5:
1243 *data = s->idr[(offset - A_IDR0) / 4];
1264 case A_IRQ_CTRL_ACK:
1265 *data = s->irq_ctrl;
1273 case A_GERROR_IRQ_CFG0: /* 64b */
1274 *data = extract64(s->gerror_irq_cfg0, 0, 32);
1276 case A_GERROR_IRQ_CFG0 + 4:
1277 *data = extract64(s->gerror_irq_cfg0, 32, 32);
1279 case A_GERROR_IRQ_CFG1:
1280 *data = s->gerror_irq_cfg1;
1282 case A_GERROR_IRQ_CFG2:
1283 *data = s->gerror_irq_cfg2;
1285 case A_STRTAB_BASE: /* 64b */
1286 *data = extract64(s->strtab_base, 0, 32);
1288 case A_STRTAB_BASE + 4: /* 64b */
1289 *data = extract64(s->strtab_base, 32, 32);
1291 case A_STRTAB_BASE_CFG:
1292 *data = s->strtab_base_cfg;
1294 case A_CMDQ_BASE: /* 64b */
1295 *data = extract64(s->cmdq.base, 0, 32);
1297 case A_CMDQ_BASE + 4:
1298 *data = extract64(s->cmdq.base, 32, 32);
1301 *data = s->cmdq.prod;
1304 *data = s->cmdq.cons;
1306 case A_EVENTQ_BASE: /* 64b */
1307 *data = extract64(s->eventq.base, 0, 32);
1309 case A_EVENTQ_BASE + 4: /* 64b */
1310 *data = extract64(s->eventq.base, 32, 32);
1313 *data = s->eventq.prod;
1316 *data = s->eventq.cons;
1320 qemu_log_mask(LOG_UNIMP,
1321 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1327 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1328 unsigned size, MemTxAttrs attrs)
1330 SMMUState *sys = opaque;
1331 SMMUv3State *s = ARM_SMMUV3(sys);
1334 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1339 r = smmu_readll(s, offset, data, attrs);
1342 r = smmu_readl(s, offset, data, attrs);
1349 trace_smmuv3_read_mmio(offset, *data, size, r);
1353 static const MemoryRegionOps smmu_mem_ops = {
1354 .read_with_attrs = smmu_read_mmio,
1355 .write_with_attrs = smmu_write_mmio,
1356 .endianness = DEVICE_LITTLE_ENDIAN,
1358 .min_access_size = 4,
1359 .max_access_size = 8,
1362 .min_access_size = 4,
1363 .max_access_size = 8,
1367 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1371 for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1372 sysbus_init_irq(dev, &s->irq[i]);
1376 static void smmu_reset(DeviceState *dev)
1378 SMMUv3State *s = ARM_SMMUV3(dev);
1379 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1381 c->parent_reset(dev);
1383 smmuv3_init_regs(s);
1386 static void smmu_realize(DeviceState *d, Error **errp)
1388 SMMUState *sys = ARM_SMMU(d);
1389 SMMUv3State *s = ARM_SMMUV3(sys);
1390 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1391 SysBusDevice *dev = SYS_BUS_DEVICE(d);
1392 Error *local_err = NULL;
1394 c->parent_realize(d, &local_err);
1396 error_propagate(errp, local_err);
1400 qemu_mutex_init(&s->mutex);
1402 memory_region_init_io(&sys->iomem, OBJECT(s),
1403 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1405 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1407 sysbus_init_mmio(dev, &sys->iomem);
1409 smmu_init_irq(s, dev);
1412 static const VMStateDescription vmstate_smmuv3_queue = {
1413 .name = "smmuv3_queue",
1415 .minimum_version_id = 1,
1416 .fields = (VMStateField[]) {
1417 VMSTATE_UINT64(base, SMMUQueue),
1418 VMSTATE_UINT32(prod, SMMUQueue),
1419 VMSTATE_UINT32(cons, SMMUQueue),
1420 VMSTATE_UINT8(log2size, SMMUQueue),
1421 VMSTATE_END_OF_LIST(),
1425 static const VMStateDescription vmstate_smmuv3 = {
1428 .minimum_version_id = 1,
1429 .fields = (VMStateField[]) {
1430 VMSTATE_UINT32(features, SMMUv3State),
1431 VMSTATE_UINT8(sid_size, SMMUv3State),
1432 VMSTATE_UINT8(sid_split, SMMUv3State),
1434 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1435 VMSTATE_UINT32(cr0ack, SMMUv3State),
1436 VMSTATE_UINT32(statusr, SMMUv3State),
1437 VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1438 VMSTATE_UINT32(gerror, SMMUv3State),
1439 VMSTATE_UINT32(gerrorn, SMMUv3State),
1440 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1441 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1442 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1443 VMSTATE_UINT64(strtab_base, SMMUv3State),
1444 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1445 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1446 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1447 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1449 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1450 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1452 VMSTATE_END_OF_LIST(),
1456 static void smmuv3_instance_init(Object *obj)
1458 /* Nothing much to do here as of now */
1461 static void smmuv3_class_init(ObjectClass *klass, void *data)
1463 DeviceClass *dc = DEVICE_CLASS(klass);
1464 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1466 dc->vmsd = &vmstate_smmuv3;
1467 device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
1468 c->parent_realize = dc->realize;
1469 dc->realize = smmu_realize;
1472 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1473 IOMMUNotifierFlag old,
1474 IOMMUNotifierFlag new,
1477 SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1478 SMMUv3State *s3 = sdev->smmu;
1479 SMMUState *s = &(s3->smmu_state);
1481 if (new & IOMMU_NOTIFIER_MAP) {
1483 "device %02x.%02x.%x requires iommu MAP notifier which is "
1484 "not currently supported", pci_bus_num(sdev->bus),
1485 PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
1489 if (old == IOMMU_NOTIFIER_NONE) {
1490 trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1491 QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
1492 } else if (new == IOMMU_NOTIFIER_NONE) {
1493 trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1494 QLIST_REMOVE(sdev, next);
1499 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1502 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1504 imrc->translate = smmuv3_translate;
1505 imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1508 static const TypeInfo smmuv3_type_info = {
1509 .name = TYPE_ARM_SMMUV3,
1510 .parent = TYPE_ARM_SMMU,
1511 .instance_size = sizeof(SMMUv3State),
1512 .instance_init = smmuv3_instance_init,
1513 .class_size = sizeof(SMMUv3Class),
1514 .class_init = smmuv3_class_init,
1517 static const TypeInfo smmuv3_iommu_memory_region_info = {
1518 .parent = TYPE_IOMMU_MEMORY_REGION,
1519 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1520 .class_init = smmuv3_iommu_memory_region_class_init,
1523 static void smmuv3_register_types(void)
1525 type_register(&smmuv3_type_info);
1526 type_register(&smmuv3_iommu_memory_region_info);
1529 type_init(smmuv3_register_types)