2 * Copyright (C) 2014-2016 Broadcom Corporation
3 * Copyright (c) 2017 Red Hat, Inc.
4 * Written by Prem Mallappa, Eric Auger
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/boards.h"
21 #include "sysemu/sysemu.h"
22 #include "hw/sysbus.h"
23 #include "hw/qdev-core.h"
24 #include "hw/pci/pci.h"
25 #include "exec/address-spaces.h"
28 #include "qemu/error-report.h"
29 #include "qapi/error.h"
31 #include "hw/arm/smmuv3.h"
32 #include "smmuv3-internal.h"
35 * smmuv3_trigger_irq - pulse @irq if enabled and update
36 * GERROR register in case of GERROR interrupt
39 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
41 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
49 pulse = smmuv3_eventq_irq_enabled(s);
52 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
54 case SMMU_IRQ_CMD_SYNC:
59 uint32_t pending = s->gerror ^ s->gerrorn;
60 uint32_t new_gerrors = ~pending & gerror_mask;
63 /* only toggle non pending errors */
66 s->gerror ^= new_gerrors;
67 trace_smmuv3_write_gerror(new_gerrors, s->gerror);
69 pulse = smmuv3_gerror_irq_enabled(s);
74 trace_smmuv3_trigger_irq(irq);
75 qemu_irq_pulse(s->irq[irq]);
79 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
81 uint32_t pending = s->gerror ^ s->gerrorn;
82 uint32_t toggled = s->gerrorn ^ new_gerrorn;
84 if (toggled & ~pending) {
85 qemu_log_mask(LOG_GUEST_ERROR,
86 "guest toggles non pending errors = 0x%x\n",
91 * We do not raise any error in case guest toggles bits corresponding
92 * to not active IRQs (CONSTRAINED UNPREDICTABLE)
94 s->gerrorn = new_gerrorn;
96 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
99 static inline MemTxResult queue_read(SMMUQueue *q, void *data)
101 dma_addr_t addr = Q_CONS_ENTRY(q);
103 return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
106 static MemTxResult queue_write(SMMUQueue *q, void *data)
108 dma_addr_t addr = Q_PROD_ENTRY(q);
111 ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
112 if (ret != MEMTX_OK) {
120 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
122 SMMUQueue *q = &s->eventq;
125 if (!smmuv3_eventq_enabled(s)) {
129 if (smmuv3_q_full(q)) {
133 r = queue_write(q, evt);
138 if (smmuv3_q_empty(q)) {
139 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
144 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
149 if (!smmuv3_eventq_enabled(s)) {
153 EVT_SET_TYPE(&evt, info->type);
154 EVT_SET_SID(&evt, info->sid);
156 switch (info->type) {
160 EVT_SET_SSID(&evt, info->u.f_uut.ssid);
161 EVT_SET_SSV(&evt, info->u.f_uut.ssv);
162 EVT_SET_ADDR(&evt, info->u.f_uut.addr);
163 EVT_SET_RNW(&evt, info->u.f_uut.rnw);
164 EVT_SET_PNU(&evt, info->u.f_uut.pnu);
165 EVT_SET_IND(&evt, info->u.f_uut.ind);
167 case SMMU_EVT_C_BAD_STREAMID:
168 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
169 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv);
171 case SMMU_EVT_F_STE_FETCH:
172 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
173 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv);
174 EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr);
176 case SMMU_EVT_C_BAD_STE:
177 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
178 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv);
180 case SMMU_EVT_F_STREAM_DISABLED:
182 case SMMU_EVT_F_TRANS_FORBIDDEN:
183 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
184 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
186 case SMMU_EVT_C_BAD_SUBSTREAMID:
187 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
189 case SMMU_EVT_F_CD_FETCH:
190 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
191 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv);
192 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
194 case SMMU_EVT_C_BAD_CD:
195 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
196 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv);
198 case SMMU_EVT_F_WALK_EABT:
199 case SMMU_EVT_F_TRANSLATION:
200 case SMMU_EVT_F_ADDR_SIZE:
201 case SMMU_EVT_F_ACCESS:
202 case SMMU_EVT_F_PERMISSION:
203 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
204 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
205 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
206 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
207 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
208 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
209 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
210 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
211 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
212 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
213 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
215 case SMMU_EVT_F_CFG_CONFLICT:
216 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
217 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv);
219 /* rest is not implemented */
220 case SMMU_EVT_F_BAD_ATS_TREQ:
221 case SMMU_EVT_F_TLB_CONFLICT:
222 case SMMU_EVT_E_PAGE_REQ:
224 g_assert_not_reached();
227 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
228 r = smmuv3_write_eventq(s, &evt);
230 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
232 info->recorded = true;
235 static void smmuv3_init_regs(SMMUv3State *s)
238 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
239 * multi-level stream table
241 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
242 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
243 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
244 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
245 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
246 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
247 /* terminated transaction will always be aborted/error returned */
248 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
249 /* 2-level stream table supported */
250 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
252 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
253 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
254 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS);
256 /* 4K and 64K granule support */
257 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
258 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
259 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
261 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
264 s->cmdq.entry_size = sizeof(struct Cmd);
265 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
268 s->eventq.entry_size = sizeof(struct Evt);
274 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
275 SMMUEventInfo *event)
279 trace_smmuv3_get_ste(addr);
280 /* TODO: guarantee 64-bit single-copy atomicity */
281 ret = dma_memory_read(&address_space_memory, addr,
282 (void *)buf, sizeof(*buf));
283 if (ret != MEMTX_OK) {
284 qemu_log_mask(LOG_GUEST_ERROR,
285 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
286 event->type = SMMU_EVT_F_STE_FETCH;
287 event->u.f_ste_fetch.addr = addr;
294 /* @ssid > 0 not supported yet */
295 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
296 CD *buf, SMMUEventInfo *event)
298 dma_addr_t addr = STE_CTXPTR(ste);
301 trace_smmuv3_get_cd(addr);
302 /* TODO: guarantee 64-bit single-copy atomicity */
303 ret = dma_memory_read(&address_space_memory, addr,
304 (void *)buf, sizeof(*buf));
305 if (ret != MEMTX_OK) {
306 qemu_log_mask(LOG_GUEST_ERROR,
307 "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
308 event->type = SMMU_EVT_F_CD_FETCH;
309 event->u.f_ste_fetch.addr = addr;
315 /* Returns <0 if the caller has no need to continue the translation */
316 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
317 STE *ste, SMMUEventInfo *event)
322 if (!STE_VALID(ste)) {
326 config = STE_CONFIG(ste);
328 if (STE_CFG_ABORT(config)) {
329 cfg->aborted = true; /* abort but don't record any event */
333 if (STE_CFG_BYPASS(config)) {
334 cfg->bypassed = true;
338 if (STE_CFG_S2_ENABLED(config)) {
339 qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
343 if (STE_S1CDMAX(ste) != 0) {
344 qemu_log_mask(LOG_UNIMP,
345 "SMMUv3 does not support multiple context descriptors yet\n");
349 if (STE_S1STALLD(ste)) {
350 qemu_log_mask(LOG_UNIMP,
351 "SMMUv3 S1 stalling fault model not allowed yet\n");
357 event->type = SMMU_EVT_C_BAD_STE;
362 * smmu_find_ste - Return the stream table entry associated
367 * @ste: returned stream table entry
368 * @event: handle to an event info
370 * Supports linear and 2-level stream table
371 * Return 0 on success, -EINVAL otherwise
373 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
374 SMMUEventInfo *event)
379 trace_smmuv3_find_ste(sid, s->features, s->sid_split);
380 /* Check SID range */
381 if (sid > (1 << SMMU_IDR1_SIDSIZE)) {
382 event->type = SMMU_EVT_C_BAD_STREAMID;
385 if (s->features & SMMU_FEATURE_2LVL_STE) {
386 int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
387 dma_addr_t strtab_base, l1ptr, l2ptr;
390 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
391 l1_ste_offset = sid >> s->sid_split;
392 l2_ste_offset = sid & ((1 << s->sid_split) - 1);
393 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
394 /* TODO: guarantee 64-bit single-copy atomicity */
395 ret = dma_memory_read(&address_space_memory, l1ptr,
396 (uint8_t *)&l1std, sizeof(l1std));
397 if (ret != MEMTX_OK) {
398 qemu_log_mask(LOG_GUEST_ERROR,
399 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
400 event->type = SMMU_EVT_F_STE_FETCH;
401 event->u.f_ste_fetch.addr = l1ptr;
405 span = L1STD_SPAN(&l1std);
408 /* l2ptr is not valid */
409 qemu_log_mask(LOG_GUEST_ERROR,
410 "invalid sid=%d (L1STD span=0)\n", sid);
411 event->type = SMMU_EVT_C_BAD_STREAMID;
414 max_l2_ste = (1 << span) - 1;
415 l2ptr = l1std_l2ptr(&l1std);
416 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
417 l2ptr, l2_ste_offset, max_l2_ste);
418 if (l2_ste_offset > max_l2_ste) {
419 qemu_log_mask(LOG_GUEST_ERROR,
420 "l2_ste_offset=%d > max_l2_ste=%d\n",
421 l2_ste_offset, max_l2_ste);
422 event->type = SMMU_EVT_C_BAD_STE;
425 addr = l2ptr + l2_ste_offset * sizeof(*ste);
427 addr = s->strtab_base + sid * sizeof(*ste);
430 if (smmu_get_ste(s, addr, ste, event)) {
437 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
442 if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
446 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
449 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
451 if (CD_HA(cd) || CD_HD(cd)) {
452 goto bad_cd; /* HTTU = 0 */
455 /* we support only those at the moment */
459 cfg->oas = oas2bits(CD_IPS(cd));
460 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
461 cfg->tbi = CD_TBI(cd);
462 cfg->asid = CD_ASID(cd);
464 trace_smmuv3_decode_cd(cfg->oas);
466 /* decode data dependent on TT */
467 for (i = 0; i <= 1; i++) {
469 SMMUTransTableInfo *tt = &cfg->tt[i];
471 cfg->tt[i].disabled = CD_EPD(cd, i);
472 if (cfg->tt[i].disabled) {
477 if (tsz < 16 || tsz > 39) {
482 tt->granule_sz = tg2granule(tg, i);
483 if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
488 tt->ttb = CD_TTB(cd, i);
489 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
492 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
495 event->record_trans_faults = CD_R(cd);
500 event->type = SMMU_EVT_C_BAD_CD;
505 * smmuv3_decode_config - Prepare the translation configuration
506 * for the @mr iommu region
507 * @mr: iommu memory region the translation config must be prepared for
508 * @cfg: output translation configuration which is populated through
509 * the different configuration decoding steps
510 * @event: must be zero'ed by the caller
512 * return < 0 if the translation needs to be aborted (@event is filled
513 * accordingly). Return 0 otherwise.
515 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
516 SMMUEventInfo *event)
518 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
519 uint32_t sid = smmu_get_sid(sdev);
520 SMMUv3State *s = sdev->smmu;
525 if (smmu_find_ste(s, sid, &ste, event)) {
529 if (decode_ste(s, cfg, &ste, event)) {
533 if (smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event)) {
537 return decode_cd(cfg, &cd, event);
540 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
541 IOMMUAccessFlags flag)
543 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
544 SMMUv3State *s = sdev->smmu;
545 uint32_t sid = smmu_get_sid(sdev);
546 SMMUEventInfo event = {.type = SMMU_EVT_OK, .sid = sid};
547 SMMUPTWEventInfo ptw_info = {};
548 SMMUTransCfg cfg = {};
549 IOMMUTLBEntry entry = {
550 .target_as = &address_space_memory,
552 .translated_addr = addr,
553 .addr_mask = ~(hwaddr)0,
558 if (!smmu_enabled(s)) {
562 ret = smmuv3_decode_config(mr, &cfg, &event);
571 ret = smmu_ptw(&cfg, addr, flag, &entry, &ptw_info);
573 switch (ptw_info.type) {
574 case SMMU_PTW_ERR_WALK_EABT:
575 event.type = SMMU_EVT_F_WALK_EABT;
576 event.u.f_walk_eabt.addr = addr;
577 event.u.f_walk_eabt.rnw = flag & 0x1;
578 event.u.f_walk_eabt.class = 0x1;
579 event.u.f_walk_eabt.addr2 = ptw_info.addr;
581 case SMMU_PTW_ERR_TRANSLATION:
582 if (event.record_trans_faults) {
583 event.type = SMMU_EVT_F_TRANSLATION;
584 event.u.f_translation.addr = addr;
585 event.u.f_translation.rnw = flag & 0x1;
588 case SMMU_PTW_ERR_ADDR_SIZE:
589 if (event.record_trans_faults) {
590 event.type = SMMU_EVT_F_ADDR_SIZE;
591 event.u.f_addr_size.addr = addr;
592 event.u.f_addr_size.rnw = flag & 0x1;
595 case SMMU_PTW_ERR_ACCESS:
596 if (event.record_trans_faults) {
597 event.type = SMMU_EVT_F_ACCESS;
598 event.u.f_access.addr = addr;
599 event.u.f_access.rnw = flag & 0x1;
602 case SMMU_PTW_ERR_PERMISSION:
603 if (event.record_trans_faults) {
604 event.type = SMMU_EVT_F_PERMISSION;
605 event.u.f_permission.addr = addr;
606 event.u.f_permission.rnw = flag & 0x1;
610 g_assert_not_reached();
615 qemu_log_mask(LOG_GUEST_ERROR,
616 "%s translation failed for iova=0x%"PRIx64"(%d)\n",
617 mr->parent_obj.name, addr, ret);
618 entry.perm = IOMMU_NONE;
619 smmuv3_record_event(s, &event);
620 } else if (!cfg.aborted) {
622 trace_smmuv3_translate(mr->parent_obj.name, sid, addr,
623 entry.translated_addr, entry.perm);
629 static int smmuv3_cmdq_consume(SMMUv3State *s)
631 SMMUCmdError cmd_error = SMMU_CERROR_NONE;
632 SMMUQueue *q = &s->cmdq;
633 SMMUCommandType type = 0;
635 if (!smmuv3_cmdq_enabled(s)) {
639 * some commands depend on register values, typically CR0. In case those
640 * register values change while handling the command, spec says it
641 * is UNPREDICTABLE whether the command is interpreted under the new
645 while (!smmuv3_q_empty(q)) {
646 uint32_t pending = s->gerror ^ s->gerrorn;
649 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
650 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
652 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
656 if (queue_read(q, &cmd) != MEMTX_OK) {
657 cmd_error = SMMU_CERROR_ABT;
661 type = CMD_TYPE(&cmd);
663 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
667 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
668 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
671 case SMMU_CMD_PREFETCH_CONFIG:
672 case SMMU_CMD_PREFETCH_ADDR:
673 case SMMU_CMD_CFGI_STE:
674 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
675 case SMMU_CMD_CFGI_CD:
676 case SMMU_CMD_CFGI_CD_ALL:
677 case SMMU_CMD_TLBI_NH_ALL:
678 case SMMU_CMD_TLBI_NH_ASID:
679 case SMMU_CMD_TLBI_NH_VA:
680 case SMMU_CMD_TLBI_NH_VAA:
681 case SMMU_CMD_TLBI_EL3_ALL:
682 case SMMU_CMD_TLBI_EL3_VA:
683 case SMMU_CMD_TLBI_EL2_ALL:
684 case SMMU_CMD_TLBI_EL2_ASID:
685 case SMMU_CMD_TLBI_EL2_VA:
686 case SMMU_CMD_TLBI_EL2_VAA:
687 case SMMU_CMD_TLBI_S12_VMALL:
688 case SMMU_CMD_TLBI_S2_IPA:
689 case SMMU_CMD_TLBI_NSNH_ALL:
690 case SMMU_CMD_ATC_INV:
691 case SMMU_CMD_PRI_RESP:
692 case SMMU_CMD_RESUME:
693 case SMMU_CMD_STALL_TERM:
694 trace_smmuv3_unhandled_cmd(type);
697 cmd_error = SMMU_CERROR_ILL;
698 qemu_log_mask(LOG_GUEST_ERROR,
699 "Illegal command type: %d\n", CMD_TYPE(&cmd));
706 * We only increment the cons index after the completion of
707 * the command. We do that because the SYNC returns immediately
708 * and does not check the completion of previous commands
714 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
715 smmu_write_cmdq_err(s, cmd_error);
716 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
719 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
720 Q_PROD_WRAP(q), Q_CONS_WRAP(q));
725 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
726 uint64_t data, MemTxAttrs attrs)
729 case A_GERROR_IRQ_CFG0:
730 s->gerror_irq_cfg0 = data;
733 s->strtab_base = data;
737 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
738 if (s->cmdq.log2size > SMMU_CMDQS) {
739 s->cmdq.log2size = SMMU_CMDQS;
743 s->eventq.base = data;
744 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
745 if (s->eventq.log2size > SMMU_EVENTQS) {
746 s->eventq.log2size = SMMU_EVENTQS;
749 case A_EVENTQ_IRQ_CFG0:
750 s->eventq_irq_cfg0 = data;
753 qemu_log_mask(LOG_UNIMP,
754 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
760 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
761 uint64_t data, MemTxAttrs attrs)
766 s->cr0ack = data & ~SMMU_CR0_RESERVED;
767 /* in case the command queue has been enabled */
768 smmuv3_cmdq_consume(s);
780 smmuv3_write_gerrorn(s, data);
782 * By acknowledging the CMDQ_ERR, SW may notify cmds can
785 smmuv3_cmdq_consume(s);
787 case A_GERROR_IRQ_CFG0: /* 64b */
788 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
790 case A_GERROR_IRQ_CFG0 + 4:
791 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
793 case A_GERROR_IRQ_CFG1:
794 s->gerror_irq_cfg1 = data;
796 case A_GERROR_IRQ_CFG2:
797 s->gerror_irq_cfg2 = data;
799 case A_STRTAB_BASE: /* 64b */
800 s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
802 case A_STRTAB_BASE + 4:
803 s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
805 case A_STRTAB_BASE_CFG:
806 s->strtab_base_cfg = data;
807 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
808 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
809 s->features |= SMMU_FEATURE_2LVL_STE;
812 case A_CMDQ_BASE: /* 64b */
813 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
814 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
815 if (s->cmdq.log2size > SMMU_CMDQS) {
816 s->cmdq.log2size = SMMU_CMDQS;
819 case A_CMDQ_BASE + 4: /* 64b */
820 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
824 smmuv3_cmdq_consume(s);
829 case A_EVENTQ_BASE: /* 64b */
830 s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
831 s->eventq.log2size = extract64(s->eventq.base, 0, 5);
832 if (s->eventq.log2size > SMMU_EVENTQS) {
833 s->eventq.log2size = SMMU_EVENTQS;
836 case A_EVENTQ_BASE + 4:
837 s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
840 s->eventq.prod = data;
843 s->eventq.cons = data;
845 case A_EVENTQ_IRQ_CFG0: /* 64b */
846 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
848 case A_EVENTQ_IRQ_CFG0 + 4:
849 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
851 case A_EVENTQ_IRQ_CFG1:
852 s->eventq_irq_cfg1 = data;
854 case A_EVENTQ_IRQ_CFG2:
855 s->eventq_irq_cfg2 = data;
858 qemu_log_mask(LOG_UNIMP,
859 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
865 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
866 unsigned size, MemTxAttrs attrs)
868 SMMUState *sys = opaque;
869 SMMUv3State *s = ARM_SMMUV3(sys);
872 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
877 r = smmu_writell(s, offset, data, attrs);
880 r = smmu_writel(s, offset, data, attrs);
887 trace_smmuv3_write_mmio(offset, data, size, r);
891 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
892 uint64_t *data, MemTxAttrs attrs)
895 case A_GERROR_IRQ_CFG0:
896 *data = s->gerror_irq_cfg0;
899 *data = s->strtab_base;
902 *data = s->cmdq.base;
905 *data = s->eventq.base;
909 qemu_log_mask(LOG_UNIMP,
910 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
916 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
917 uint64_t *data, MemTxAttrs attrs)
920 case A_IDREGS ... A_IDREGS + 0x1f:
921 *data = smmuv3_idreg(offset - A_IDREGS);
923 case A_IDR0 ... A_IDR5:
924 *data = s->idr[(offset - A_IDR0) / 4];
954 case A_GERROR_IRQ_CFG0: /* 64b */
955 *data = extract64(s->gerror_irq_cfg0, 0, 32);
957 case A_GERROR_IRQ_CFG0 + 4:
958 *data = extract64(s->gerror_irq_cfg0, 32, 32);
960 case A_GERROR_IRQ_CFG1:
961 *data = s->gerror_irq_cfg1;
963 case A_GERROR_IRQ_CFG2:
964 *data = s->gerror_irq_cfg2;
966 case A_STRTAB_BASE: /* 64b */
967 *data = extract64(s->strtab_base, 0, 32);
969 case A_STRTAB_BASE + 4: /* 64b */
970 *data = extract64(s->strtab_base, 32, 32);
972 case A_STRTAB_BASE_CFG:
973 *data = s->strtab_base_cfg;
975 case A_CMDQ_BASE: /* 64b */
976 *data = extract64(s->cmdq.base, 0, 32);
978 case A_CMDQ_BASE + 4:
979 *data = extract64(s->cmdq.base, 32, 32);
982 *data = s->cmdq.prod;
985 *data = s->cmdq.cons;
987 case A_EVENTQ_BASE: /* 64b */
988 *data = extract64(s->eventq.base, 0, 32);
990 case A_EVENTQ_BASE + 4: /* 64b */
991 *data = extract64(s->eventq.base, 32, 32);
994 *data = s->eventq.prod;
997 *data = s->eventq.cons;
1001 qemu_log_mask(LOG_UNIMP,
1002 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1008 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1009 unsigned size, MemTxAttrs attrs)
1011 SMMUState *sys = opaque;
1012 SMMUv3State *s = ARM_SMMUV3(sys);
1015 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1020 r = smmu_readll(s, offset, data, attrs);
1023 r = smmu_readl(s, offset, data, attrs);
1030 trace_smmuv3_read_mmio(offset, *data, size, r);
1034 static const MemoryRegionOps smmu_mem_ops = {
1035 .read_with_attrs = smmu_read_mmio,
1036 .write_with_attrs = smmu_write_mmio,
1037 .endianness = DEVICE_LITTLE_ENDIAN,
1039 .min_access_size = 4,
1040 .max_access_size = 8,
1043 .min_access_size = 4,
1044 .max_access_size = 8,
1048 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1052 for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1053 sysbus_init_irq(dev, &s->irq[i]);
1057 static void smmu_reset(DeviceState *dev)
1059 SMMUv3State *s = ARM_SMMUV3(dev);
1060 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1062 c->parent_reset(dev);
1064 smmuv3_init_regs(s);
1067 static void smmu_realize(DeviceState *d, Error **errp)
1069 SMMUState *sys = ARM_SMMU(d);
1070 SMMUv3State *s = ARM_SMMUV3(sys);
1071 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1072 SysBusDevice *dev = SYS_BUS_DEVICE(d);
1073 Error *local_err = NULL;
1075 c->parent_realize(d, &local_err);
1077 error_propagate(errp, local_err);
1081 memory_region_init_io(&sys->iomem, OBJECT(s),
1082 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1084 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1086 sysbus_init_mmio(dev, &sys->iomem);
1088 smmu_init_irq(s, dev);
1091 static const VMStateDescription vmstate_smmuv3_queue = {
1092 .name = "smmuv3_queue",
1094 .minimum_version_id = 1,
1095 .fields = (VMStateField[]) {
1096 VMSTATE_UINT64(base, SMMUQueue),
1097 VMSTATE_UINT32(prod, SMMUQueue),
1098 VMSTATE_UINT32(cons, SMMUQueue),
1099 VMSTATE_UINT8(log2size, SMMUQueue),
1103 static const VMStateDescription vmstate_smmuv3 = {
1106 .minimum_version_id = 1,
1107 .fields = (VMStateField[]) {
1108 VMSTATE_UINT32(features, SMMUv3State),
1109 VMSTATE_UINT8(sid_size, SMMUv3State),
1110 VMSTATE_UINT8(sid_split, SMMUv3State),
1112 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1113 VMSTATE_UINT32(cr0ack, SMMUv3State),
1114 VMSTATE_UINT32(statusr, SMMUv3State),
1115 VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1116 VMSTATE_UINT32(gerror, SMMUv3State),
1117 VMSTATE_UINT32(gerrorn, SMMUv3State),
1118 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1119 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1120 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1121 VMSTATE_UINT64(strtab_base, SMMUv3State),
1122 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1123 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1124 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1125 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1127 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1128 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1130 VMSTATE_END_OF_LIST(),
1134 static void smmuv3_instance_init(Object *obj)
1136 /* Nothing much to do here as of now */
1139 static void smmuv3_class_init(ObjectClass *klass, void *data)
1141 DeviceClass *dc = DEVICE_CLASS(klass);
1142 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1144 dc->vmsd = &vmstate_smmuv3;
1145 device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
1146 c->parent_realize = dc->realize;
1147 dc->realize = smmu_realize;
1150 static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1151 IOMMUNotifierFlag old,
1152 IOMMUNotifierFlag new)
1154 if (old == IOMMU_NOTIFIER_NONE) {
1155 warn_report("SMMUV3 does not support vhost/vfio integration yet: "
1156 "devices of those types will not function properly");
1160 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1163 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1165 imrc->translate = smmuv3_translate;
1166 imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1169 static const TypeInfo smmuv3_type_info = {
1170 .name = TYPE_ARM_SMMUV3,
1171 .parent = TYPE_ARM_SMMU,
1172 .instance_size = sizeof(SMMUv3State),
1173 .instance_init = smmuv3_instance_init,
1174 .class_size = sizeof(SMMUv3Class),
1175 .class_init = smmuv3_class_init,
1178 static const TypeInfo smmuv3_iommu_memory_region_info = {
1179 .parent = TYPE_IOMMU_MEMORY_REGION,
1180 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1181 .class_init = smmuv3_iommu_memory_region_class_init,
1184 static void smmuv3_register_types(void)
1186 type_register(&smmuv3_type_info);
1187 type_register(&smmuv3_iommu_memory_region_info);
1190 type_init(smmuv3_register_types)