]>
Commit | Line | Data |
---|---|---|
10a83cb9 PM |
1 | /* |
2 | * Copyright (C) 2014-2016 Broadcom Corporation | |
3 | * Copyright (c) 2017 Red Hat, Inc. | |
4 | * Written by Prem Mallappa, Eric Auger | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
16 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
19 | #include "qemu/osdep.h" | |
20 | #include "hw/boards.h" | |
64552b6b | 21 | #include "hw/irq.h" |
10a83cb9 PM |
22 | #include "sysemu/sysemu.h" |
23 | #include "hw/sysbus.h" | |
24 | #include "hw/qdev-core.h" | |
25 | #include "hw/pci/pci.h" | |
26 | #include "exec/address-spaces.h" | |
9122bea9 | 27 | #include "cpu.h" |
10a83cb9 PM |
28 | #include "trace.h" |
29 | #include "qemu/log.h" | |
30 | #include "qemu/error-report.h" | |
31 | #include "qapi/error.h" | |
32 | ||
33 | #include "hw/arm/smmuv3.h" | |
34 | #include "smmuv3-internal.h" | |
35 | ||
6a736033 EA |
36 | /** |
37 | * smmuv3_trigger_irq - pulse @irq if enabled and update | |
38 | * GERROR register in case of GERROR interrupt | |
39 | * | |
40 | * @irq: irq type | |
41 | * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) | |
42 | */ | |
fae4be38 EA |
43 | static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, |
44 | uint32_t gerror_mask) | |
6a736033 EA |
45 | { |
46 | ||
47 | bool pulse = false; | |
48 | ||
49 | switch (irq) { | |
50 | case SMMU_IRQ_EVTQ: | |
51 | pulse = smmuv3_eventq_irq_enabled(s); | |
52 | break; | |
53 | case SMMU_IRQ_PRIQ: | |
54 | qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n"); | |
55 | break; | |
56 | case SMMU_IRQ_CMD_SYNC: | |
57 | pulse = true; | |
58 | break; | |
59 | case SMMU_IRQ_GERROR: | |
60 | { | |
61 | uint32_t pending = s->gerror ^ s->gerrorn; | |
62 | uint32_t new_gerrors = ~pending & gerror_mask; | |
63 | ||
64 | if (!new_gerrors) { | |
65 | /* only toggle non pending errors */ | |
66 | return; | |
67 | } | |
68 | s->gerror ^= new_gerrors; | |
69 | trace_smmuv3_write_gerror(new_gerrors, s->gerror); | |
70 | ||
71 | pulse = smmuv3_gerror_irq_enabled(s); | |
72 | break; | |
73 | } | |
74 | } | |
75 | if (pulse) { | |
76 | trace_smmuv3_trigger_irq(irq); | |
77 | qemu_irq_pulse(s->irq[irq]); | |
78 | } | |
79 | } | |
80 | ||
fae4be38 | 81 | static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) |
6a736033 EA |
82 | { |
83 | uint32_t pending = s->gerror ^ s->gerrorn; | |
84 | uint32_t toggled = s->gerrorn ^ new_gerrorn; | |
85 | ||
86 | if (toggled & ~pending) { | |
87 | qemu_log_mask(LOG_GUEST_ERROR, | |
88 | "guest toggles non pending errors = 0x%x\n", | |
89 | toggled & ~pending); | |
90 | } | |
91 | ||
92 | /* | |
93 | * We do not raise any error in case guest toggles bits corresponding | |
94 | * to not active IRQs (CONSTRAINED UNPREDICTABLE) | |
95 | */ | |
96 | s->gerrorn = new_gerrorn; | |
97 | ||
98 | trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); | |
99 | } | |
100 | ||
dadd1a08 EA |
101 | static inline MemTxResult queue_read(SMMUQueue *q, void *data) |
102 | { | |
103 | dma_addr_t addr = Q_CONS_ENTRY(q); | |
104 | ||
105 | return dma_memory_read(&address_space_memory, addr, data, q->entry_size); | |
106 | } | |
107 | ||
108 | static MemTxResult queue_write(SMMUQueue *q, void *data) | |
109 | { | |
110 | dma_addr_t addr = Q_PROD_ENTRY(q); | |
111 | MemTxResult ret; | |
112 | ||
113 | ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); | |
114 | if (ret != MEMTX_OK) { | |
115 | return ret; | |
116 | } | |
117 | ||
118 | queue_prod_incr(q); | |
119 | return MEMTX_OK; | |
120 | } | |
121 | ||
bb981004 | 122 | static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) |
dadd1a08 EA |
123 | { |
124 | SMMUQueue *q = &s->eventq; | |
bb981004 | 125 | MemTxResult r; |
dadd1a08 EA |
126 | |
127 | if (!smmuv3_eventq_enabled(s)) { | |
bb981004 | 128 | return MEMTX_ERROR; |
dadd1a08 EA |
129 | } |
130 | ||
131 | if (smmuv3_q_full(q)) { | |
bb981004 | 132 | return MEMTX_ERROR; |
dadd1a08 EA |
133 | } |
134 | ||
bb981004 EA |
135 | r = queue_write(q, evt); |
136 | if (r != MEMTX_OK) { | |
137 | return r; | |
138 | } | |
dadd1a08 | 139 | |
9f4d2a13 | 140 | if (!smmuv3_q_empty(q)) { |
dadd1a08 EA |
141 | smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); |
142 | } | |
bb981004 EA |
143 | return MEMTX_OK; |
144 | } | |
145 | ||
146 | void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) | |
147 | { | |
24af32e0 | 148 | Evt evt = {}; |
bb981004 EA |
149 | MemTxResult r; |
150 | ||
151 | if (!smmuv3_eventq_enabled(s)) { | |
152 | return; | |
153 | } | |
154 | ||
155 | EVT_SET_TYPE(&evt, info->type); | |
156 | EVT_SET_SID(&evt, info->sid); | |
157 | ||
158 | switch (info->type) { | |
9122bea9 | 159 | case SMMU_EVT_NONE: |
bb981004 EA |
160 | return; |
161 | case SMMU_EVT_F_UUT: | |
162 | EVT_SET_SSID(&evt, info->u.f_uut.ssid); | |
163 | EVT_SET_SSV(&evt, info->u.f_uut.ssv); | |
164 | EVT_SET_ADDR(&evt, info->u.f_uut.addr); | |
165 | EVT_SET_RNW(&evt, info->u.f_uut.rnw); | |
166 | EVT_SET_PNU(&evt, info->u.f_uut.pnu); | |
167 | EVT_SET_IND(&evt, info->u.f_uut.ind); | |
168 | break; | |
169 | case SMMU_EVT_C_BAD_STREAMID: | |
170 | EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid); | |
171 | EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv); | |
172 | break; | |
173 | case SMMU_EVT_F_STE_FETCH: | |
174 | EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid); | |
175 | EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv); | |
176 | EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr); | |
177 | break; | |
178 | case SMMU_EVT_C_BAD_STE: | |
179 | EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid); | |
180 | EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv); | |
181 | break; | |
182 | case SMMU_EVT_F_STREAM_DISABLED: | |
183 | break; | |
184 | case SMMU_EVT_F_TRANS_FORBIDDEN: | |
185 | EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr); | |
186 | EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw); | |
187 | break; | |
188 | case SMMU_EVT_C_BAD_SUBSTREAMID: | |
189 | EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid); | |
190 | break; | |
191 | case SMMU_EVT_F_CD_FETCH: | |
192 | EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid); | |
193 | EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv); | |
194 | EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr); | |
195 | break; | |
196 | case SMMU_EVT_C_BAD_CD: | |
197 | EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid); | |
198 | EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv); | |
199 | break; | |
200 | case SMMU_EVT_F_WALK_EABT: | |
201 | case SMMU_EVT_F_TRANSLATION: | |
202 | case SMMU_EVT_F_ADDR_SIZE: | |
203 | case SMMU_EVT_F_ACCESS: | |
204 | case SMMU_EVT_F_PERMISSION: | |
205 | EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall); | |
206 | EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag); | |
207 | EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid); | |
208 | EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv); | |
209 | EVT_SET_S2(&evt, info->u.f_walk_eabt.s2); | |
210 | EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr); | |
211 | EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw); | |
212 | EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu); | |
213 | EVT_SET_IND(&evt, info->u.f_walk_eabt.ind); | |
214 | EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class); | |
215 | EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2); | |
216 | break; | |
217 | case SMMU_EVT_F_CFG_CONFLICT: | |
218 | EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid); | |
219 | EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv); | |
220 | break; | |
221 | /* rest is not implemented */ | |
222 | case SMMU_EVT_F_BAD_ATS_TREQ: | |
223 | case SMMU_EVT_F_TLB_CONFLICT: | |
224 | case SMMU_EVT_E_PAGE_REQ: | |
225 | default: | |
226 | g_assert_not_reached(); | |
227 | } | |
228 | ||
229 | trace_smmuv3_record_event(smmu_event_string(info->type), info->sid); | |
230 | r = smmuv3_write_eventq(s, &evt); | |
231 | if (r != MEMTX_OK) { | |
232 | smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK); | |
233 | } | |
234 | info->recorded = true; | |
dadd1a08 EA |
235 | } |
236 | ||
10a83cb9 PM |
237 | static void smmuv3_init_regs(SMMUv3State *s) |
238 | { | |
239 | /** | |
240 | * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, | |
241 | * multi-level stream table | |
242 | */ | |
243 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ | |
244 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ | |
245 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ | |
246 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ | |
247 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ | |
248 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ | |
249 | /* terminated transaction will always be aborted/error returned */ | |
250 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); | |
251 | /* 2-level stream table supported */ | |
252 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); | |
253 | ||
254 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); | |
255 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); | |
256 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); | |
257 | ||
258 | /* 4K and 64K granule support */ | |
259 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); | |
260 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); | |
261 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ | |
262 | ||
263 | s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); | |
264 | s->cmdq.prod = 0; | |
265 | s->cmdq.cons = 0; | |
266 | s->cmdq.entry_size = sizeof(struct Cmd); | |
267 | s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); | |
268 | s->eventq.prod = 0; | |
269 | s->eventq.cons = 0; | |
270 | s->eventq.entry_size = sizeof(struct Evt); | |
271 | ||
272 | s->features = 0; | |
273 | s->sid_split = 0; | |
274 | } | |
275 | ||
9bde7f06 EA |
276 | static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, |
277 | SMMUEventInfo *event) | |
278 | { | |
279 | int ret; | |
280 | ||
281 | trace_smmuv3_get_ste(addr); | |
282 | /* TODO: guarantee 64-bit single-copy atomicity */ | |
283 | ret = dma_memory_read(&address_space_memory, addr, | |
284 | (void *)buf, sizeof(*buf)); | |
285 | if (ret != MEMTX_OK) { | |
286 | qemu_log_mask(LOG_GUEST_ERROR, | |
287 | "Cannot fetch pte at address=0x%"PRIx64"\n", addr); | |
288 | event->type = SMMU_EVT_F_STE_FETCH; | |
289 | event->u.f_ste_fetch.addr = addr; | |
290 | return -EINVAL; | |
291 | } | |
292 | return 0; | |
293 | ||
294 | } | |
295 | ||
296 | /* @ssid > 0 not supported yet */ | |
297 | static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, | |
298 | CD *buf, SMMUEventInfo *event) | |
299 | { | |
300 | dma_addr_t addr = STE_CTXPTR(ste); | |
301 | int ret; | |
302 | ||
303 | trace_smmuv3_get_cd(addr); | |
304 | /* TODO: guarantee 64-bit single-copy atomicity */ | |
305 | ret = dma_memory_read(&address_space_memory, addr, | |
306 | (void *)buf, sizeof(*buf)); | |
307 | if (ret != MEMTX_OK) { | |
308 | qemu_log_mask(LOG_GUEST_ERROR, | |
309 | "Cannot fetch pte at address=0x%"PRIx64"\n", addr); | |
310 | event->type = SMMU_EVT_F_CD_FETCH; | |
311 | event->u.f_ste_fetch.addr = addr; | |
312 | return -EINVAL; | |
313 | } | |
314 | return 0; | |
315 | } | |
316 | ||
9122bea9 | 317 | /* Returns < 0 in case of invalid STE, 0 otherwise */ |
9bde7f06 EA |
318 | static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, |
319 | STE *ste, SMMUEventInfo *event) | |
320 | { | |
321 | uint32_t config; | |
9bde7f06 EA |
322 | |
323 | if (!STE_VALID(ste)) { | |
324 | goto bad_ste; | |
325 | } | |
326 | ||
327 | config = STE_CONFIG(ste); | |
328 | ||
329 | if (STE_CFG_ABORT(config)) { | |
9122bea9 JH |
330 | cfg->aborted = true; |
331 | return 0; | |
9bde7f06 EA |
332 | } |
333 | ||
334 | if (STE_CFG_BYPASS(config)) { | |
335 | cfg->bypassed = true; | |
9122bea9 | 336 | return 0; |
9bde7f06 EA |
337 | } |
338 | ||
339 | if (STE_CFG_S2_ENABLED(config)) { | |
340 | qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); | |
341 | goto bad_ste; | |
342 | } | |
343 | ||
344 | if (STE_S1CDMAX(ste) != 0) { | |
345 | qemu_log_mask(LOG_UNIMP, | |
346 | "SMMUv3 does not support multiple context descriptors yet\n"); | |
347 | goto bad_ste; | |
348 | } | |
349 | ||
350 | if (STE_S1STALLD(ste)) { | |
351 | qemu_log_mask(LOG_UNIMP, | |
352 | "SMMUv3 S1 stalling fault model not allowed yet\n"); | |
353 | goto bad_ste; | |
354 | } | |
355 | return 0; | |
356 | ||
357 | bad_ste: | |
358 | event->type = SMMU_EVT_C_BAD_STE; | |
359 | return -EINVAL; | |
360 | } | |
361 | ||
362 | /** | |
363 | * smmu_find_ste - Return the stream table entry associated | |
364 | * to the sid | |
365 | * | |
366 | * @s: smmuv3 handle | |
367 | * @sid: stream ID | |
368 | * @ste: returned stream table entry | |
369 | * @event: handle to an event info | |
370 | * | |
371 | * Supports linear and 2-level stream table | |
372 | * Return 0 on success, -EINVAL otherwise | |
373 | */ | |
374 | static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, | |
375 | SMMUEventInfo *event) | |
376 | { | |
377 | dma_addr_t addr; | |
378 | int ret; | |
379 | ||
380 | trace_smmuv3_find_ste(sid, s->features, s->sid_split); | |
381 | /* Check SID range */ | |
382 | if (sid > (1 << SMMU_IDR1_SIDSIZE)) { | |
383 | event->type = SMMU_EVT_C_BAD_STREAMID; | |
384 | return -EINVAL; | |
385 | } | |
386 | if (s->features & SMMU_FEATURE_2LVL_STE) { | |
387 | int l1_ste_offset, l2_ste_offset, max_l2_ste, span; | |
388 | dma_addr_t strtab_base, l1ptr, l2ptr; | |
389 | STEDesc l1std; | |
390 | ||
391 | strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK; | |
392 | l1_ste_offset = sid >> s->sid_split; | |
393 | l2_ste_offset = sid & ((1 << s->sid_split) - 1); | |
394 | l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); | |
395 | /* TODO: guarantee 64-bit single-copy atomicity */ | |
396 | ret = dma_memory_read(&address_space_memory, l1ptr, | |
397 | (uint8_t *)&l1std, sizeof(l1std)); | |
398 | if (ret != MEMTX_OK) { | |
399 | qemu_log_mask(LOG_GUEST_ERROR, | |
400 | "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); | |
401 | event->type = SMMU_EVT_F_STE_FETCH; | |
402 | event->u.f_ste_fetch.addr = l1ptr; | |
403 | return -EINVAL; | |
404 | } | |
405 | ||
406 | span = L1STD_SPAN(&l1std); | |
407 | ||
408 | if (!span) { | |
409 | /* l2ptr is not valid */ | |
410 | qemu_log_mask(LOG_GUEST_ERROR, | |
411 | "invalid sid=%d (L1STD span=0)\n", sid); | |
412 | event->type = SMMU_EVT_C_BAD_STREAMID; | |
413 | return -EINVAL; | |
414 | } | |
415 | max_l2_ste = (1 << span) - 1; | |
416 | l2ptr = l1std_l2ptr(&l1std); | |
417 | trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset, | |
418 | l2ptr, l2_ste_offset, max_l2_ste); | |
419 | if (l2_ste_offset > max_l2_ste) { | |
420 | qemu_log_mask(LOG_GUEST_ERROR, | |
421 | "l2_ste_offset=%d > max_l2_ste=%d\n", | |
422 | l2_ste_offset, max_l2_ste); | |
423 | event->type = SMMU_EVT_C_BAD_STE; | |
424 | return -EINVAL; | |
425 | } | |
426 | addr = l2ptr + l2_ste_offset * sizeof(*ste); | |
427 | } else { | |
428 | addr = s->strtab_base + sid * sizeof(*ste); | |
429 | } | |
430 | ||
431 | if (smmu_get_ste(s, addr, ste, event)) { | |
432 | return -EINVAL; | |
433 | } | |
434 | ||
435 | return 0; | |
436 | } | |
437 | ||
438 | static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) | |
439 | { | |
440 | int ret = -EINVAL; | |
441 | int i; | |
442 | ||
443 | if (!CD_VALID(cd) || !CD_AARCH64(cd)) { | |
444 | goto bad_cd; | |
445 | } | |
446 | if (!CD_A(cd)) { | |
447 | goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */ | |
448 | } | |
449 | if (CD_S(cd)) { | |
450 | goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */ | |
451 | } | |
452 | if (CD_HA(cd) || CD_HD(cd)) { | |
453 | goto bad_cd; /* HTTU = 0 */ | |
454 | } | |
455 | ||
456 | /* we support only those at the moment */ | |
457 | cfg->aa64 = true; | |
458 | cfg->stage = 1; | |
459 | ||
460 | cfg->oas = oas2bits(CD_IPS(cd)); | |
461 | cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); | |
462 | cfg->tbi = CD_TBI(cd); | |
463 | cfg->asid = CD_ASID(cd); | |
464 | ||
465 | trace_smmuv3_decode_cd(cfg->oas); | |
466 | ||
467 | /* decode data dependent on TT */ | |
468 | for (i = 0; i <= 1; i++) { | |
469 | int tg, tsz; | |
470 | SMMUTransTableInfo *tt = &cfg->tt[i]; | |
471 | ||
472 | cfg->tt[i].disabled = CD_EPD(cd, i); | |
473 | if (cfg->tt[i].disabled) { | |
474 | continue; | |
475 | } | |
476 | ||
477 | tsz = CD_TSZ(cd, i); | |
478 | if (tsz < 16 || tsz > 39) { | |
479 | goto bad_cd; | |
480 | } | |
481 | ||
482 | tg = CD_TG(cd, i); | |
483 | tt->granule_sz = tg2granule(tg, i); | |
484 | if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) { | |
485 | goto bad_cd; | |
486 | } | |
487 | ||
488 | tt->tsz = tsz; | |
489 | tt->ttb = CD_TTB(cd, i); | |
490 | if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { | |
491 | goto bad_cd; | |
492 | } | |
493 | trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz); | |
494 | } | |
495 | ||
496 | event->record_trans_faults = CD_R(cd); | |
497 | ||
498 | return 0; | |
499 | ||
500 | bad_cd: | |
501 | event->type = SMMU_EVT_C_BAD_CD; | |
502 | return ret; | |
503 | } | |
504 | ||
505 | /** | |
506 | * smmuv3_decode_config - Prepare the translation configuration | |
507 | * for the @mr iommu region | |
508 | * @mr: iommu memory region the translation config must be prepared for | |
509 | * @cfg: output translation configuration which is populated through | |
510 | * the different configuration decoding steps | |
511 | * @event: must be zero'ed by the caller | |
512 | * | |
9122bea9 | 513 | * return < 0 in case of config decoding error (@event is filled |
9bde7f06 EA |
514 | * accordingly). Return 0 otherwise. |
515 | */ | |
516 | static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, | |
517 | SMMUEventInfo *event) | |
518 | { | |
519 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); | |
520 | uint32_t sid = smmu_get_sid(sdev); | |
521 | SMMUv3State *s = sdev->smmu; | |
9122bea9 | 522 | int ret; |
9bde7f06 EA |
523 | STE ste; |
524 | CD cd; | |
525 | ||
9122bea9 JH |
526 | ret = smmu_find_ste(s, sid, &ste, event); |
527 | if (ret) { | |
9bde7f06 EA |
528 | return ret; |
529 | } | |
530 | ||
9122bea9 JH |
531 | ret = decode_ste(s, cfg, &ste, event); |
532 | if (ret) { | |
9bde7f06 EA |
533 | return ret; |
534 | } | |
535 | ||
9122bea9 JH |
536 | if (cfg->aborted || cfg->bypassed) { |
537 | return 0; | |
538 | } | |
539 | ||
540 | ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); | |
541 | if (ret) { | |
9bde7f06 EA |
542 | return ret; |
543 | } | |
544 | ||
545 | return decode_cd(cfg, &cd, event); | |
546 | } | |
547 | ||
32cfd7f3 EA |
548 | /** |
549 | * smmuv3_get_config - Look up for a cached copy of configuration data for | |
550 | * @sdev and on cache miss performs a configuration structure decoding from | |
551 | * guest RAM. | |
552 | * | |
553 | * @sdev: SMMUDevice handle | |
554 | * @event: output event info | |
555 | * | |
556 | * The configuration cache contains data resulting from both STE and CD | |
557 | * decoding under the form of an SMMUTransCfg struct. The hash table is indexed | |
558 | * by the SMMUDevice handle. | |
559 | */ | |
560 | static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) | |
561 | { | |
562 | SMMUv3State *s = sdev->smmu; | |
563 | SMMUState *bc = &s->smmu_state; | |
564 | SMMUTransCfg *cfg; | |
565 | ||
566 | cfg = g_hash_table_lookup(bc->configs, sdev); | |
567 | if (cfg) { | |
568 | sdev->cfg_cache_hits++; | |
569 | trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), | |
570 | sdev->cfg_cache_hits, sdev->cfg_cache_misses, | |
571 | 100 * sdev->cfg_cache_hits / | |
572 | (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); | |
573 | } else { | |
574 | sdev->cfg_cache_misses++; | |
575 | trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), | |
576 | sdev->cfg_cache_hits, sdev->cfg_cache_misses, | |
577 | 100 * sdev->cfg_cache_hits / | |
578 | (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); | |
579 | cfg = g_new0(SMMUTransCfg, 1); | |
580 | ||
581 | if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { | |
582 | g_hash_table_insert(bc->configs, sdev, cfg); | |
583 | } else { | |
584 | g_free(cfg); | |
585 | cfg = NULL; | |
586 | } | |
587 | } | |
588 | return cfg; | |
589 | } | |
590 | ||
591 | static void smmuv3_flush_config(SMMUDevice *sdev) | |
592 | { | |
593 | SMMUv3State *s = sdev->smmu; | |
594 | SMMUState *bc = &s->smmu_state; | |
595 | ||
596 | trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); | |
597 | g_hash_table_remove(bc->configs, sdev); | |
598 | } | |
599 | ||
9bde7f06 | 600 | static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, |
2c91bcf2 | 601 | IOMMUAccessFlags flag, int iommu_idx) |
9bde7f06 EA |
602 | { |
603 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); | |
604 | SMMUv3State *s = sdev->smmu; | |
605 | uint32_t sid = smmu_get_sid(sdev); | |
9122bea9 | 606 | SMMUEventInfo event = {.type = SMMU_EVT_NONE, .sid = sid}; |
9bde7f06 | 607 | SMMUPTWEventInfo ptw_info = {}; |
9122bea9 | 608 | SMMUTranslationStatus status; |
cc27ed81 EA |
609 | SMMUState *bs = ARM_SMMU(s); |
610 | uint64_t page_mask, aligned_addr; | |
611 | IOMMUTLBEntry *cached_entry = NULL; | |
612 | SMMUTransTableInfo *tt; | |
32cfd7f3 | 613 | SMMUTransCfg *cfg = NULL; |
9bde7f06 EA |
614 | IOMMUTLBEntry entry = { |
615 | .target_as = &address_space_memory, | |
616 | .iova = addr, | |
617 | .translated_addr = addr, | |
618 | .addr_mask = ~(hwaddr)0, | |
619 | .perm = IOMMU_NONE, | |
620 | }; | |
cc27ed81 | 621 | SMMUIOTLBKey key, *new_key; |
9bde7f06 | 622 | |
32cfd7f3 EA |
623 | qemu_mutex_lock(&s->mutex); |
624 | ||
9bde7f06 | 625 | if (!smmu_enabled(s)) { |
9122bea9 JH |
626 | status = SMMU_TRANS_DISABLE; |
627 | goto epilogue; | |
9bde7f06 EA |
628 | } |
629 | ||
32cfd7f3 EA |
630 | cfg = smmuv3_get_config(sdev, &event); |
631 | if (!cfg) { | |
9122bea9 JH |
632 | status = SMMU_TRANS_ERROR; |
633 | goto epilogue; | |
9bde7f06 EA |
634 | } |
635 | ||
32cfd7f3 | 636 | if (cfg->aborted) { |
9122bea9 JH |
637 | status = SMMU_TRANS_ABORT; |
638 | goto epilogue; | |
9bde7f06 EA |
639 | } |
640 | ||
32cfd7f3 | 641 | if (cfg->bypassed) { |
9122bea9 JH |
642 | status = SMMU_TRANS_BYPASS; |
643 | goto epilogue; | |
644 | } | |
645 | ||
cc27ed81 EA |
646 | tt = select_tt(cfg, addr); |
647 | if (!tt) { | |
648 | if (event.record_trans_faults) { | |
649 | event.type = SMMU_EVT_F_TRANSLATION; | |
650 | event.u.f_translation.addr = addr; | |
651 | event.u.f_translation.rnw = flag & 0x1; | |
652 | } | |
653 | status = SMMU_TRANS_ERROR; | |
654 | goto epilogue; | |
655 | } | |
656 | ||
657 | page_mask = (1ULL << (tt->granule_sz)) - 1; | |
658 | aligned_addr = addr & ~page_mask; | |
659 | ||
660 | key.asid = cfg->asid; | |
661 | key.iova = aligned_addr; | |
662 | ||
663 | cached_entry = g_hash_table_lookup(bs->iotlb, &key); | |
664 | if (cached_entry) { | |
665 | cfg->iotlb_hits++; | |
666 | trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr, | |
667 | cfg->iotlb_hits, cfg->iotlb_misses, | |
668 | 100 * cfg->iotlb_hits / | |
669 | (cfg->iotlb_hits + cfg->iotlb_misses)); | |
670 | if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) { | |
671 | status = SMMU_TRANS_ERROR; | |
672 | if (event.record_trans_faults) { | |
673 | event.type = SMMU_EVT_F_PERMISSION; | |
674 | event.u.f_permission.addr = addr; | |
675 | event.u.f_permission.rnw = flag & 0x1; | |
676 | } | |
677 | } else { | |
678 | status = SMMU_TRANS_SUCCESS; | |
679 | } | |
680 | goto epilogue; | |
681 | } | |
682 | ||
683 | cfg->iotlb_misses++; | |
684 | trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask, | |
685 | cfg->iotlb_hits, cfg->iotlb_misses, | |
686 | 100 * cfg->iotlb_hits / | |
687 | (cfg->iotlb_hits + cfg->iotlb_misses)); | |
688 | ||
689 | if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { | |
690 | smmu_iotlb_inv_all(bs); | |
691 | } | |
692 | ||
693 | cached_entry = g_new0(IOMMUTLBEntry, 1); | |
694 | ||
695 | if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { | |
696 | g_free(cached_entry); | |
9bde7f06 EA |
697 | switch (ptw_info.type) { |
698 | case SMMU_PTW_ERR_WALK_EABT: | |
699 | event.type = SMMU_EVT_F_WALK_EABT; | |
700 | event.u.f_walk_eabt.addr = addr; | |
701 | event.u.f_walk_eabt.rnw = flag & 0x1; | |
702 | event.u.f_walk_eabt.class = 0x1; | |
703 | event.u.f_walk_eabt.addr2 = ptw_info.addr; | |
704 | break; | |
705 | case SMMU_PTW_ERR_TRANSLATION: | |
706 | if (event.record_trans_faults) { | |
707 | event.type = SMMU_EVT_F_TRANSLATION; | |
708 | event.u.f_translation.addr = addr; | |
709 | event.u.f_translation.rnw = flag & 0x1; | |
710 | } | |
711 | break; | |
712 | case SMMU_PTW_ERR_ADDR_SIZE: | |
713 | if (event.record_trans_faults) { | |
714 | event.type = SMMU_EVT_F_ADDR_SIZE; | |
715 | event.u.f_addr_size.addr = addr; | |
716 | event.u.f_addr_size.rnw = flag & 0x1; | |
717 | } | |
718 | break; | |
719 | case SMMU_PTW_ERR_ACCESS: | |
720 | if (event.record_trans_faults) { | |
721 | event.type = SMMU_EVT_F_ACCESS; | |
722 | event.u.f_access.addr = addr; | |
723 | event.u.f_access.rnw = flag & 0x1; | |
724 | } | |
725 | break; | |
726 | case SMMU_PTW_ERR_PERMISSION: | |
727 | if (event.record_trans_faults) { | |
728 | event.type = SMMU_EVT_F_PERMISSION; | |
729 | event.u.f_permission.addr = addr; | |
730 | event.u.f_permission.rnw = flag & 0x1; | |
731 | } | |
732 | break; | |
733 | default: | |
734 | g_assert_not_reached(); | |
735 | } | |
9122bea9 JH |
736 | status = SMMU_TRANS_ERROR; |
737 | } else { | |
cc27ed81 EA |
738 | new_key = g_new0(SMMUIOTLBKey, 1); |
739 | new_key->asid = cfg->asid; | |
740 | new_key->iova = aligned_addr; | |
741 | g_hash_table_insert(bs->iotlb, new_key, cached_entry); | |
9122bea9 | 742 | status = SMMU_TRANS_SUCCESS; |
9bde7f06 | 743 | } |
9122bea9 JH |
744 | |
745 | epilogue: | |
32cfd7f3 | 746 | qemu_mutex_unlock(&s->mutex); |
9122bea9 JH |
747 | switch (status) { |
748 | case SMMU_TRANS_SUCCESS: | |
749 | entry.perm = flag; | |
cc27ed81 EA |
750 | entry.translated_addr = cached_entry->translated_addr + |
751 | (addr & page_mask); | |
752 | entry.addr_mask = cached_entry->addr_mask; | |
9122bea9 JH |
753 | trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, |
754 | entry.translated_addr, entry.perm); | |
755 | break; | |
756 | case SMMU_TRANS_DISABLE: | |
757 | entry.perm = flag; | |
758 | entry.addr_mask = ~TARGET_PAGE_MASK; | |
759 | trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, | |
760 | entry.perm); | |
761 | break; | |
762 | case SMMU_TRANS_BYPASS: | |
763 | entry.perm = flag; | |
764 | entry.addr_mask = ~TARGET_PAGE_MASK; | |
765 | trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, | |
766 | entry.perm); | |
767 | break; | |
768 | case SMMU_TRANS_ABORT: | |
769 | /* no event is recorded on abort */ | |
770 | trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, | |
771 | entry.perm); | |
772 | break; | |
773 | case SMMU_TRANS_ERROR: | |
9bde7f06 | 774 | qemu_log_mask(LOG_GUEST_ERROR, |
9122bea9 JH |
775 | "%s translation failed for iova=0x%"PRIx64"(%s)\n", |
776 | mr->parent_obj.name, addr, smmu_event_string(event.type)); | |
9bde7f06 | 777 | smmuv3_record_event(s, &event); |
9122bea9 | 778 | break; |
9bde7f06 EA |
779 | } |
780 | ||
781 | return entry; | |
782 | } | |
783 | ||
832e4222 EA |
784 | /** |
785 | * smmuv3_notify_iova - call the notifier @n for a given | |
786 | * @asid and @iova tuple. | |
787 | * | |
788 | * @mr: IOMMU mr region handle | |
789 | * @n: notifier to be called | |
790 | * @asid: address space ID or negative value if we don't care | |
791 | * @iova: iova | |
792 | */ | |
793 | static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, | |
794 | IOMMUNotifier *n, | |
795 | int asid, | |
796 | dma_addr_t iova) | |
797 | { | |
798 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); | |
799 | SMMUEventInfo event = {}; | |
800 | SMMUTransTableInfo *tt; | |
801 | SMMUTransCfg *cfg; | |
802 | IOMMUTLBEntry entry; | |
803 | ||
804 | cfg = smmuv3_get_config(sdev, &event); | |
805 | if (!cfg) { | |
806 | qemu_log_mask(LOG_GUEST_ERROR, | |
807 | "%s error decoding the configuration for iommu mr=%s\n", | |
808 | __func__, mr->parent_obj.name); | |
809 | return; | |
810 | } | |
811 | ||
812 | if (asid >= 0 && cfg->asid != asid) { | |
813 | return; | |
814 | } | |
815 | ||
816 | tt = select_tt(cfg, iova); | |
817 | if (!tt) { | |
818 | return; | |
819 | } | |
820 | ||
821 | entry.target_as = &address_space_memory; | |
822 | entry.iova = iova; | |
823 | entry.addr_mask = (1 << tt->granule_sz) - 1; | |
824 | entry.perm = IOMMU_NONE; | |
825 | ||
826 | memory_region_notify_one(n, &entry); | |
827 | } | |
828 | ||
829 | /* invalidate an asid/iova tuple in all mr's */ | |
830 | static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova) | |
831 | { | |
c6370441 | 832 | SMMUDevice *sdev; |
832e4222 | 833 | |
c6370441 EA |
834 | QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { |
835 | IOMMUMemoryRegion *mr = &sdev->iommu; | |
832e4222 EA |
836 | IOMMUNotifier *n; |
837 | ||
838 | trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova); | |
839 | ||
840 | IOMMU_NOTIFIER_FOREACH(n, mr) { | |
841 | smmuv3_notify_iova(mr, n, asid, iova); | |
842 | } | |
843 | } | |
844 | } | |
845 | ||
fae4be38 | 846 | static int smmuv3_cmdq_consume(SMMUv3State *s) |
dadd1a08 | 847 | { |
32cfd7f3 | 848 | SMMUState *bs = ARM_SMMU(s); |
dadd1a08 EA |
849 | SMMUCmdError cmd_error = SMMU_CERROR_NONE; |
850 | SMMUQueue *q = &s->cmdq; | |
851 | SMMUCommandType type = 0; | |
852 | ||
853 | if (!smmuv3_cmdq_enabled(s)) { | |
854 | return 0; | |
855 | } | |
856 | /* | |
857 | * some commands depend on register values, typically CR0. In case those | |
858 | * register values change while handling the command, spec says it | |
859 | * is UNPREDICTABLE whether the command is interpreted under the new | |
860 | * or old value. | |
861 | */ | |
862 | ||
863 | while (!smmuv3_q_empty(q)) { | |
864 | uint32_t pending = s->gerror ^ s->gerrorn; | |
865 | Cmd cmd; | |
866 | ||
867 | trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), | |
868 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); | |
869 | ||
870 | if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { | |
871 | break; | |
872 | } | |
873 | ||
874 | if (queue_read(q, &cmd) != MEMTX_OK) { | |
875 | cmd_error = SMMU_CERROR_ABT; | |
876 | break; | |
877 | } | |
878 | ||
879 | type = CMD_TYPE(&cmd); | |
880 | ||
881 | trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); | |
882 | ||
32cfd7f3 | 883 | qemu_mutex_lock(&s->mutex); |
dadd1a08 EA |
884 | switch (type) { |
885 | case SMMU_CMD_SYNC: | |
886 | if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { | |
887 | smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); | |
888 | } | |
889 | break; | |
890 | case SMMU_CMD_PREFETCH_CONFIG: | |
891 | case SMMU_CMD_PREFETCH_ADDR: | |
32cfd7f3 | 892 | break; |
dadd1a08 | 893 | case SMMU_CMD_CFGI_STE: |
32cfd7f3 EA |
894 | { |
895 | uint32_t sid = CMD_SID(&cmd); | |
896 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); | |
897 | SMMUDevice *sdev; | |
898 | ||
899 | if (CMD_SSEC(&cmd)) { | |
900 | cmd_error = SMMU_CERROR_ILL; | |
901 | break; | |
902 | } | |
903 | ||
904 | if (!mr) { | |
905 | break; | |
906 | } | |
907 | ||
908 | trace_smmuv3_cmdq_cfgi_ste(sid); | |
909 | sdev = container_of(mr, SMMUDevice, iommu); | |
910 | smmuv3_flush_config(sdev); | |
911 | ||
912 | break; | |
913 | } | |
dadd1a08 | 914 | case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ |
32cfd7f3 EA |
915 | { |
916 | uint32_t start = CMD_SID(&cmd), end, i; | |
917 | uint8_t range = CMD_STE_RANGE(&cmd); | |
918 | ||
919 | if (CMD_SSEC(&cmd)) { | |
920 | cmd_error = SMMU_CERROR_ILL; | |
921 | break; | |
922 | } | |
923 | ||
924 | end = start + (1 << (range + 1)) - 1; | |
925 | trace_smmuv3_cmdq_cfgi_ste_range(start, end); | |
926 | ||
927 | for (i = start; i <= end; i++) { | |
928 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i); | |
929 | SMMUDevice *sdev; | |
930 | ||
931 | if (!mr) { | |
932 | continue; | |
933 | } | |
934 | sdev = container_of(mr, SMMUDevice, iommu); | |
935 | smmuv3_flush_config(sdev); | |
936 | } | |
937 | break; | |
938 | } | |
dadd1a08 EA |
939 | case SMMU_CMD_CFGI_CD: |
940 | case SMMU_CMD_CFGI_CD_ALL: | |
32cfd7f3 EA |
941 | { |
942 | uint32_t sid = CMD_SID(&cmd); | |
943 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); | |
944 | SMMUDevice *sdev; | |
945 | ||
946 | if (CMD_SSEC(&cmd)) { | |
947 | cmd_error = SMMU_CERROR_ILL; | |
948 | break; | |
949 | } | |
950 | ||
951 | if (!mr) { | |
952 | break; | |
953 | } | |
954 | ||
955 | trace_smmuv3_cmdq_cfgi_cd(sid); | |
956 | sdev = container_of(mr, SMMUDevice, iommu); | |
957 | smmuv3_flush_config(sdev); | |
958 | break; | |
959 | } | |
dadd1a08 | 960 | case SMMU_CMD_TLBI_NH_ASID: |
cc27ed81 EA |
961 | { |
962 | uint16_t asid = CMD_ASID(&cmd); | |
963 | ||
964 | trace_smmuv3_cmdq_tlbi_nh_asid(asid); | |
832e4222 | 965 | smmu_inv_notifiers_all(&s->smmu_state); |
cc27ed81 EA |
966 | smmu_iotlb_inv_asid(bs, asid); |
967 | break; | |
968 | } | |
969 | case SMMU_CMD_TLBI_NH_ALL: | |
970 | case SMMU_CMD_TLBI_NSNH_ALL: | |
971 | trace_smmuv3_cmdq_tlbi_nh(); | |
832e4222 | 972 | smmu_inv_notifiers_all(&s->smmu_state); |
cc27ed81 EA |
973 | smmu_iotlb_inv_all(bs); |
974 | break; | |
dadd1a08 | 975 | case SMMU_CMD_TLBI_NH_VAA: |
cc27ed81 EA |
976 | { |
977 | dma_addr_t addr = CMD_ADDR(&cmd); | |
978 | uint16_t vmid = CMD_VMID(&cmd); | |
979 | ||
980 | trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr); | |
832e4222 | 981 | smmuv3_inv_notifiers_iova(bs, -1, addr); |
cc27ed81 EA |
982 | smmu_iotlb_inv_all(bs); |
983 | break; | |
984 | } | |
985 | case SMMU_CMD_TLBI_NH_VA: | |
986 | { | |
987 | uint16_t asid = CMD_ASID(&cmd); | |
988 | uint16_t vmid = CMD_VMID(&cmd); | |
989 | dma_addr_t addr = CMD_ADDR(&cmd); | |
990 | bool leaf = CMD_LEAF(&cmd); | |
991 | ||
992 | trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf); | |
832e4222 | 993 | smmuv3_inv_notifiers_iova(bs, asid, addr); |
cc27ed81 EA |
994 | smmu_iotlb_inv_iova(bs, asid, addr); |
995 | break; | |
996 | } | |
dadd1a08 EA |
997 | case SMMU_CMD_TLBI_EL3_ALL: |
998 | case SMMU_CMD_TLBI_EL3_VA: | |
999 | case SMMU_CMD_TLBI_EL2_ALL: | |
1000 | case SMMU_CMD_TLBI_EL2_ASID: | |
1001 | case SMMU_CMD_TLBI_EL2_VA: | |
1002 | case SMMU_CMD_TLBI_EL2_VAA: | |
1003 | case SMMU_CMD_TLBI_S12_VMALL: | |
1004 | case SMMU_CMD_TLBI_S2_IPA: | |
dadd1a08 EA |
1005 | case SMMU_CMD_ATC_INV: |
1006 | case SMMU_CMD_PRI_RESP: | |
1007 | case SMMU_CMD_RESUME: | |
1008 | case SMMU_CMD_STALL_TERM: | |
1009 | trace_smmuv3_unhandled_cmd(type); | |
1010 | break; | |
1011 | default: | |
1012 | cmd_error = SMMU_CERROR_ILL; | |
1013 | qemu_log_mask(LOG_GUEST_ERROR, | |
1014 | "Illegal command type: %d\n", CMD_TYPE(&cmd)); | |
1015 | break; | |
1016 | } | |
32cfd7f3 | 1017 | qemu_mutex_unlock(&s->mutex); |
dadd1a08 EA |
1018 | if (cmd_error) { |
1019 | break; | |
1020 | } | |
1021 | /* | |
1022 | * We only increment the cons index after the completion of | |
1023 | * the command. We do that because the SYNC returns immediately | |
1024 | * and does not check the completion of previous commands | |
1025 | */ | |
1026 | queue_cons_incr(q); | |
1027 | } | |
1028 | ||
1029 | if (cmd_error) { | |
1030 | trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); | |
1031 | smmu_write_cmdq_err(s, cmd_error); | |
1032 | smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); | |
1033 | } | |
1034 | ||
1035 | trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), | |
1036 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); | |
1037 | ||
1038 | return 0; | |
1039 | } | |
1040 | ||
fae4be38 EA |
1041 | static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, |
1042 | uint64_t data, MemTxAttrs attrs) | |
1043 | { | |
1044 | switch (offset) { | |
1045 | case A_GERROR_IRQ_CFG0: | |
1046 | s->gerror_irq_cfg0 = data; | |
1047 | return MEMTX_OK; | |
1048 | case A_STRTAB_BASE: | |
1049 | s->strtab_base = data; | |
1050 | return MEMTX_OK; | |
1051 | case A_CMDQ_BASE: | |
1052 | s->cmdq.base = data; | |
1053 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); | |
1054 | if (s->cmdq.log2size > SMMU_CMDQS) { | |
1055 | s->cmdq.log2size = SMMU_CMDQS; | |
1056 | } | |
1057 | return MEMTX_OK; | |
1058 | case A_EVENTQ_BASE: | |
1059 | s->eventq.base = data; | |
1060 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); | |
1061 | if (s->eventq.log2size > SMMU_EVENTQS) { | |
1062 | s->eventq.log2size = SMMU_EVENTQS; | |
1063 | } | |
1064 | return MEMTX_OK; | |
1065 | case A_EVENTQ_IRQ_CFG0: | |
1066 | s->eventq_irq_cfg0 = data; | |
1067 | return MEMTX_OK; | |
1068 | default: | |
1069 | qemu_log_mask(LOG_UNIMP, | |
1070 | "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", | |
1071 | __func__, offset); | |
1072 | return MEMTX_OK; | |
1073 | } | |
1074 | } | |
1075 | ||
1076 | static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, | |
1077 | uint64_t data, MemTxAttrs attrs) | |
1078 | { | |
1079 | switch (offset) { | |
1080 | case A_CR0: | |
1081 | s->cr[0] = data; | |
1082 | s->cr0ack = data & ~SMMU_CR0_RESERVED; | |
1083 | /* in case the command queue has been enabled */ | |
1084 | smmuv3_cmdq_consume(s); | |
1085 | return MEMTX_OK; | |
1086 | case A_CR1: | |
1087 | s->cr[1] = data; | |
1088 | return MEMTX_OK; | |
1089 | case A_CR2: | |
1090 | s->cr[2] = data; | |
1091 | return MEMTX_OK; | |
1092 | case A_IRQ_CTRL: | |
1093 | s->irq_ctrl = data; | |
1094 | return MEMTX_OK; | |
1095 | case A_GERRORN: | |
1096 | smmuv3_write_gerrorn(s, data); | |
1097 | /* | |
1098 | * By acknowledging the CMDQ_ERR, SW may notify cmds can | |
1099 | * be processed again | |
1100 | */ | |
1101 | smmuv3_cmdq_consume(s); | |
1102 | return MEMTX_OK; | |
1103 | case A_GERROR_IRQ_CFG0: /* 64b */ | |
1104 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); | |
1105 | return MEMTX_OK; | |
1106 | case A_GERROR_IRQ_CFG0 + 4: | |
1107 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); | |
1108 | return MEMTX_OK; | |
1109 | case A_GERROR_IRQ_CFG1: | |
1110 | s->gerror_irq_cfg1 = data; | |
1111 | return MEMTX_OK; | |
1112 | case A_GERROR_IRQ_CFG2: | |
1113 | s->gerror_irq_cfg2 = data; | |
1114 | return MEMTX_OK; | |
1115 | case A_STRTAB_BASE: /* 64b */ | |
1116 | s->strtab_base = deposit64(s->strtab_base, 0, 32, data); | |
1117 | return MEMTX_OK; | |
1118 | case A_STRTAB_BASE + 4: | |
1119 | s->strtab_base = deposit64(s->strtab_base, 32, 32, data); | |
1120 | return MEMTX_OK; | |
1121 | case A_STRTAB_BASE_CFG: | |
1122 | s->strtab_base_cfg = data; | |
1123 | if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { | |
1124 | s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); | |
1125 | s->features |= SMMU_FEATURE_2LVL_STE; | |
1126 | } | |
1127 | return MEMTX_OK; | |
1128 | case A_CMDQ_BASE: /* 64b */ | |
1129 | s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); | |
1130 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); | |
1131 | if (s->cmdq.log2size > SMMU_CMDQS) { | |
1132 | s->cmdq.log2size = SMMU_CMDQS; | |
1133 | } | |
1134 | return MEMTX_OK; | |
1135 | case A_CMDQ_BASE + 4: /* 64b */ | |
1136 | s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); | |
1137 | return MEMTX_OK; | |
1138 | case A_CMDQ_PROD: | |
1139 | s->cmdq.prod = data; | |
1140 | smmuv3_cmdq_consume(s); | |
1141 | return MEMTX_OK; | |
1142 | case A_CMDQ_CONS: | |
1143 | s->cmdq.cons = data; | |
1144 | return MEMTX_OK; | |
1145 | case A_EVENTQ_BASE: /* 64b */ | |
1146 | s->eventq.base = deposit64(s->eventq.base, 0, 32, data); | |
1147 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); | |
1148 | if (s->eventq.log2size > SMMU_EVENTQS) { | |
1149 | s->eventq.log2size = SMMU_EVENTQS; | |
1150 | } | |
1151 | return MEMTX_OK; | |
1152 | case A_EVENTQ_BASE + 4: | |
1153 | s->eventq.base = deposit64(s->eventq.base, 32, 32, data); | |
1154 | return MEMTX_OK; | |
1155 | case A_EVENTQ_PROD: | |
1156 | s->eventq.prod = data; | |
1157 | return MEMTX_OK; | |
1158 | case A_EVENTQ_CONS: | |
1159 | s->eventq.cons = data; | |
1160 | return MEMTX_OK; | |
1161 | case A_EVENTQ_IRQ_CFG0: /* 64b */ | |
1162 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); | |
1163 | return MEMTX_OK; | |
1164 | case A_EVENTQ_IRQ_CFG0 + 4: | |
1165 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); | |
1166 | return MEMTX_OK; | |
1167 | case A_EVENTQ_IRQ_CFG1: | |
1168 | s->eventq_irq_cfg1 = data; | |
1169 | return MEMTX_OK; | |
1170 | case A_EVENTQ_IRQ_CFG2: | |
1171 | s->eventq_irq_cfg2 = data; | |
1172 | return MEMTX_OK; | |
1173 | default: | |
1174 | qemu_log_mask(LOG_UNIMP, | |
1175 | "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", | |
1176 | __func__, offset); | |
1177 | return MEMTX_OK; | |
1178 | } | |
1179 | } | |
1180 | ||
10a83cb9 PM |
1181 | static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, |
1182 | unsigned size, MemTxAttrs attrs) | |
1183 | { | |
fae4be38 EA |
1184 | SMMUState *sys = opaque; |
1185 | SMMUv3State *s = ARM_SMMUV3(sys); | |
1186 | MemTxResult r; | |
1187 | ||
1188 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ | |
1189 | offset &= ~0x10000; | |
1190 | ||
1191 | switch (size) { | |
1192 | case 8: | |
1193 | r = smmu_writell(s, offset, data, attrs); | |
1194 | break; | |
1195 | case 4: | |
1196 | r = smmu_writel(s, offset, data, attrs); | |
1197 | break; | |
1198 | default: | |
1199 | r = MEMTX_ERROR; | |
1200 | break; | |
1201 | } | |
1202 | ||
1203 | trace_smmuv3_write_mmio(offset, data, size, r); | |
1204 | return r; | |
10a83cb9 PM |
1205 | } |
1206 | ||
1207 | static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, | |
1208 | uint64_t *data, MemTxAttrs attrs) | |
1209 | { | |
1210 | switch (offset) { | |
1211 | case A_GERROR_IRQ_CFG0: | |
1212 | *data = s->gerror_irq_cfg0; | |
1213 | return MEMTX_OK; | |
1214 | case A_STRTAB_BASE: | |
1215 | *data = s->strtab_base; | |
1216 | return MEMTX_OK; | |
1217 | case A_CMDQ_BASE: | |
1218 | *data = s->cmdq.base; | |
1219 | return MEMTX_OK; | |
1220 | case A_EVENTQ_BASE: | |
1221 | *data = s->eventq.base; | |
1222 | return MEMTX_OK; | |
1223 | default: | |
1224 | *data = 0; | |
1225 | qemu_log_mask(LOG_UNIMP, | |
1226 | "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n", | |
1227 | __func__, offset); | |
1228 | return MEMTX_OK; | |
1229 | } | |
1230 | } | |
1231 | ||
1232 | static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, | |
1233 | uint64_t *data, MemTxAttrs attrs) | |
1234 | { | |
1235 | switch (offset) { | |
97fb318d | 1236 | case A_IDREGS ... A_IDREGS + 0x2f: |
10a83cb9 PM |
1237 | *data = smmuv3_idreg(offset - A_IDREGS); |
1238 | return MEMTX_OK; | |
1239 | case A_IDR0 ... A_IDR5: | |
1240 | *data = s->idr[(offset - A_IDR0) / 4]; | |
1241 | return MEMTX_OK; | |
1242 | case A_IIDR: | |
1243 | *data = s->iidr; | |
1244 | return MEMTX_OK; | |
1245 | case A_CR0: | |
1246 | *data = s->cr[0]; | |
1247 | return MEMTX_OK; | |
1248 | case A_CR0ACK: | |
1249 | *data = s->cr0ack; | |
1250 | return MEMTX_OK; | |
1251 | case A_CR1: | |
1252 | *data = s->cr[1]; | |
1253 | return MEMTX_OK; | |
1254 | case A_CR2: | |
1255 | *data = s->cr[2]; | |
1256 | return MEMTX_OK; | |
1257 | case A_STATUSR: | |
1258 | *data = s->statusr; | |
1259 | return MEMTX_OK; | |
1260 | case A_IRQ_CTRL: | |
1261 | case A_IRQ_CTRL_ACK: | |
1262 | *data = s->irq_ctrl; | |
1263 | return MEMTX_OK; | |
1264 | case A_GERROR: | |
1265 | *data = s->gerror; | |
1266 | return MEMTX_OK; | |
1267 | case A_GERRORN: | |
1268 | *data = s->gerrorn; | |
1269 | return MEMTX_OK; | |
1270 | case A_GERROR_IRQ_CFG0: /* 64b */ | |
1271 | *data = extract64(s->gerror_irq_cfg0, 0, 32); | |
1272 | return MEMTX_OK; | |
1273 | case A_GERROR_IRQ_CFG0 + 4: | |
1274 | *data = extract64(s->gerror_irq_cfg0, 32, 32); | |
1275 | return MEMTX_OK; | |
1276 | case A_GERROR_IRQ_CFG1: | |
1277 | *data = s->gerror_irq_cfg1; | |
1278 | return MEMTX_OK; | |
1279 | case A_GERROR_IRQ_CFG2: | |
1280 | *data = s->gerror_irq_cfg2; | |
1281 | return MEMTX_OK; | |
1282 | case A_STRTAB_BASE: /* 64b */ | |
1283 | *data = extract64(s->strtab_base, 0, 32); | |
1284 | return MEMTX_OK; | |
1285 | case A_STRTAB_BASE + 4: /* 64b */ | |
1286 | *data = extract64(s->strtab_base, 32, 32); | |
1287 | return MEMTX_OK; | |
1288 | case A_STRTAB_BASE_CFG: | |
1289 | *data = s->strtab_base_cfg; | |
1290 | return MEMTX_OK; | |
1291 | case A_CMDQ_BASE: /* 64b */ | |
1292 | *data = extract64(s->cmdq.base, 0, 32); | |
1293 | return MEMTX_OK; | |
1294 | case A_CMDQ_BASE + 4: | |
1295 | *data = extract64(s->cmdq.base, 32, 32); | |
1296 | return MEMTX_OK; | |
1297 | case A_CMDQ_PROD: | |
1298 | *data = s->cmdq.prod; | |
1299 | return MEMTX_OK; | |
1300 | case A_CMDQ_CONS: | |
1301 | *data = s->cmdq.cons; | |
1302 | return MEMTX_OK; | |
1303 | case A_EVENTQ_BASE: /* 64b */ | |
1304 | *data = extract64(s->eventq.base, 0, 32); | |
1305 | return MEMTX_OK; | |
1306 | case A_EVENTQ_BASE + 4: /* 64b */ | |
1307 | *data = extract64(s->eventq.base, 32, 32); | |
1308 | return MEMTX_OK; | |
1309 | case A_EVENTQ_PROD: | |
1310 | *data = s->eventq.prod; | |
1311 | return MEMTX_OK; | |
1312 | case A_EVENTQ_CONS: | |
1313 | *data = s->eventq.cons; | |
1314 | return MEMTX_OK; | |
1315 | default: | |
1316 | *data = 0; | |
1317 | qemu_log_mask(LOG_UNIMP, | |
1318 | "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n", | |
1319 | __func__, offset); | |
1320 | return MEMTX_OK; | |
1321 | } | |
1322 | } | |
1323 | ||
1324 | static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, | |
1325 | unsigned size, MemTxAttrs attrs) | |
1326 | { | |
1327 | SMMUState *sys = opaque; | |
1328 | SMMUv3State *s = ARM_SMMUV3(sys); | |
1329 | MemTxResult r; | |
1330 | ||
1331 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ | |
1332 | offset &= ~0x10000; | |
1333 | ||
1334 | switch (size) { | |
1335 | case 8: | |
1336 | r = smmu_readll(s, offset, data, attrs); | |
1337 | break; | |
1338 | case 4: | |
1339 | r = smmu_readl(s, offset, data, attrs); | |
1340 | break; | |
1341 | default: | |
1342 | r = MEMTX_ERROR; | |
1343 | break; | |
1344 | } | |
1345 | ||
1346 | trace_smmuv3_read_mmio(offset, *data, size, r); | |
1347 | return r; | |
1348 | } | |
1349 | ||
1350 | static const MemoryRegionOps smmu_mem_ops = { | |
1351 | .read_with_attrs = smmu_read_mmio, | |
1352 | .write_with_attrs = smmu_write_mmio, | |
1353 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1354 | .valid = { | |
1355 | .min_access_size = 4, | |
1356 | .max_access_size = 8, | |
1357 | }, | |
1358 | .impl = { | |
1359 | .min_access_size = 4, | |
1360 | .max_access_size = 8, | |
1361 | }, | |
1362 | }; | |
1363 | ||
1364 | static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) | |
1365 | { | |
1366 | int i; | |
1367 | ||
1368 | for (i = 0; i < ARRAY_SIZE(s->irq); i++) { | |
1369 | sysbus_init_irq(dev, &s->irq[i]); | |
1370 | } | |
1371 | } | |
1372 | ||
1373 | static void smmu_reset(DeviceState *dev) | |
1374 | { | |
1375 | SMMUv3State *s = ARM_SMMUV3(dev); | |
1376 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); | |
1377 | ||
1378 | c->parent_reset(dev); | |
1379 | ||
1380 | smmuv3_init_regs(s); | |
1381 | } | |
1382 | ||
1383 | static void smmu_realize(DeviceState *d, Error **errp) | |
1384 | { | |
1385 | SMMUState *sys = ARM_SMMU(d); | |
1386 | SMMUv3State *s = ARM_SMMUV3(sys); | |
1387 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); | |
1388 | SysBusDevice *dev = SYS_BUS_DEVICE(d); | |
1389 | Error *local_err = NULL; | |
1390 | ||
1391 | c->parent_realize(d, &local_err); | |
1392 | if (local_err) { | |
1393 | error_propagate(errp, local_err); | |
1394 | return; | |
1395 | } | |
1396 | ||
32cfd7f3 EA |
1397 | qemu_mutex_init(&s->mutex); |
1398 | ||
10a83cb9 PM |
1399 | memory_region_init_io(&sys->iomem, OBJECT(s), |
1400 | &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); | |
1401 | ||
1402 | sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; | |
1403 | ||
1404 | sysbus_init_mmio(dev, &sys->iomem); | |
1405 | ||
1406 | smmu_init_irq(s, dev); | |
1407 | } | |
1408 | ||
1409 | static const VMStateDescription vmstate_smmuv3_queue = { | |
1410 | .name = "smmuv3_queue", | |
1411 | .version_id = 1, | |
1412 | .minimum_version_id = 1, | |
1413 | .fields = (VMStateField[]) { | |
1414 | VMSTATE_UINT64(base, SMMUQueue), | |
1415 | VMSTATE_UINT32(prod, SMMUQueue), | |
1416 | VMSTATE_UINT32(cons, SMMUQueue), | |
1417 | VMSTATE_UINT8(log2size, SMMUQueue), | |
758b71f7 | 1418 | VMSTATE_END_OF_LIST(), |
10a83cb9 PM |
1419 | }, |
1420 | }; | |
1421 | ||
1422 | static const VMStateDescription vmstate_smmuv3 = { | |
1423 | .name = "smmuv3", | |
1424 | .version_id = 1, | |
1425 | .minimum_version_id = 1, | |
1426 | .fields = (VMStateField[]) { | |
1427 | VMSTATE_UINT32(features, SMMUv3State), | |
1428 | VMSTATE_UINT8(sid_size, SMMUv3State), | |
1429 | VMSTATE_UINT8(sid_split, SMMUv3State), | |
1430 | ||
1431 | VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), | |
1432 | VMSTATE_UINT32(cr0ack, SMMUv3State), | |
1433 | VMSTATE_UINT32(statusr, SMMUv3State), | |
1434 | VMSTATE_UINT32(irq_ctrl, SMMUv3State), | |
1435 | VMSTATE_UINT32(gerror, SMMUv3State), | |
1436 | VMSTATE_UINT32(gerrorn, SMMUv3State), | |
1437 | VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), | |
1438 | VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), | |
1439 | VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), | |
1440 | VMSTATE_UINT64(strtab_base, SMMUv3State), | |
1441 | VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), | |
1442 | VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), | |
1443 | VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), | |
1444 | VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), | |
1445 | ||
1446 | VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), | |
1447 | VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), | |
1448 | ||
1449 | VMSTATE_END_OF_LIST(), | |
1450 | }, | |
1451 | }; | |
1452 | ||
1453 | static void smmuv3_instance_init(Object *obj) | |
1454 | { | |
1455 | /* Nothing much to do here as of now */ | |
1456 | } | |
1457 | ||
1458 | static void smmuv3_class_init(ObjectClass *klass, void *data) | |
1459 | { | |
1460 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1461 | SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); | |
1462 | ||
1463 | dc->vmsd = &vmstate_smmuv3; | |
1464 | device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset); | |
1465 | c->parent_realize = dc->realize; | |
1466 | dc->realize = smmu_realize; | |
1467 | } | |
1468 | ||
0d1ac82e EA |
1469 | static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, |
1470 | IOMMUNotifierFlag old, | |
1471 | IOMMUNotifierFlag new) | |
1472 | { | |
832e4222 EA |
1473 | SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); |
1474 | SMMUv3State *s3 = sdev->smmu; | |
1475 | SMMUState *s = &(s3->smmu_state); | |
832e4222 EA |
1476 | |
1477 | if (new & IOMMU_NOTIFIER_MAP) { | |
1478 | int bus_num = pci_bus_num(sdev->bus); | |
1479 | PCIDevice *pcidev = pci_find_device(sdev->bus, bus_num, sdev->devfn); | |
1480 | ||
1481 | warn_report("SMMUv3 does not support notification on MAP: " | |
1482 | "device %s will not function properly", pcidev->name); | |
1483 | } | |
1484 | ||
0d1ac82e | 1485 | if (old == IOMMU_NOTIFIER_NONE) { |
832e4222 | 1486 | trace_smmuv3_notify_flag_add(iommu->parent_obj.name); |
c6370441 EA |
1487 | QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next); |
1488 | } else if (new == IOMMU_NOTIFIER_NONE) { | |
1489 | trace_smmuv3_notify_flag_del(iommu->parent_obj.name); | |
1490 | QLIST_REMOVE(sdev, next); | |
0d1ac82e EA |
1491 | } |
1492 | } | |
1493 | ||
10a83cb9 PM |
1494 | static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, |
1495 | void *data) | |
1496 | { | |
9bde7f06 EA |
1497 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); |
1498 | ||
1499 | imrc->translate = smmuv3_translate; | |
0d1ac82e | 1500 | imrc->notify_flag_changed = smmuv3_notify_flag_changed; |
10a83cb9 PM |
1501 | } |
1502 | ||
1503 | static const TypeInfo smmuv3_type_info = { | |
1504 | .name = TYPE_ARM_SMMUV3, | |
1505 | .parent = TYPE_ARM_SMMU, | |
1506 | .instance_size = sizeof(SMMUv3State), | |
1507 | .instance_init = smmuv3_instance_init, | |
1508 | .class_size = sizeof(SMMUv3Class), | |
1509 | .class_init = smmuv3_class_init, | |
1510 | }; | |
1511 | ||
1512 | static const TypeInfo smmuv3_iommu_memory_region_info = { | |
1513 | .parent = TYPE_IOMMU_MEMORY_REGION, | |
1514 | .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, | |
1515 | .class_init = smmuv3_iommu_memory_region_class_init, | |
1516 | }; | |
1517 | ||
1518 | static void smmuv3_register_types(void) | |
1519 | { | |
1520 | type_register(&smmuv3_type_info); | |
1521 | type_register(&smmuv3_iommu_memory_region_info); | |
1522 | } | |
1523 | ||
1524 | type_init(smmuv3_register_types) | |
1525 |