]>
Commit | Line | Data |
---|---|---|
10a83cb9 PM |
1 | /* |
2 | * Copyright (C) 2014-2016 Broadcom Corporation | |
3 | * Copyright (c) 2017 Red Hat, Inc. | |
4 | * Written by Prem Mallappa, Eric Auger | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along | |
16 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
19 | #include "qemu/osdep.h" | |
20 | #include "hw/boards.h" | |
21 | #include "sysemu/sysemu.h" | |
22 | #include "hw/sysbus.h" | |
23 | #include "hw/qdev-core.h" | |
24 | #include "hw/pci/pci.h" | |
25 | #include "exec/address-spaces.h" | |
9122bea9 | 26 | #include "cpu.h" |
10a83cb9 PM |
27 | #include "trace.h" |
28 | #include "qemu/log.h" | |
29 | #include "qemu/error-report.h" | |
30 | #include "qapi/error.h" | |
31 | ||
32 | #include "hw/arm/smmuv3.h" | |
33 | #include "smmuv3-internal.h" | |
34 | ||
6a736033 EA |
35 | /** |
36 | * smmuv3_trigger_irq - pulse @irq if enabled and update | |
37 | * GERROR register in case of GERROR interrupt | |
38 | * | |
39 | * @irq: irq type | |
40 | * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) | |
41 | */ | |
fae4be38 EA |
42 | static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, |
43 | uint32_t gerror_mask) | |
6a736033 EA |
44 | { |
45 | ||
46 | bool pulse = false; | |
47 | ||
48 | switch (irq) { | |
49 | case SMMU_IRQ_EVTQ: | |
50 | pulse = smmuv3_eventq_irq_enabled(s); | |
51 | break; | |
52 | case SMMU_IRQ_PRIQ: | |
53 | qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n"); | |
54 | break; | |
55 | case SMMU_IRQ_CMD_SYNC: | |
56 | pulse = true; | |
57 | break; | |
58 | case SMMU_IRQ_GERROR: | |
59 | { | |
60 | uint32_t pending = s->gerror ^ s->gerrorn; | |
61 | uint32_t new_gerrors = ~pending & gerror_mask; | |
62 | ||
63 | if (!new_gerrors) { | |
64 | /* only toggle non pending errors */ | |
65 | return; | |
66 | } | |
67 | s->gerror ^= new_gerrors; | |
68 | trace_smmuv3_write_gerror(new_gerrors, s->gerror); | |
69 | ||
70 | pulse = smmuv3_gerror_irq_enabled(s); | |
71 | break; | |
72 | } | |
73 | } | |
74 | if (pulse) { | |
75 | trace_smmuv3_trigger_irq(irq); | |
76 | qemu_irq_pulse(s->irq[irq]); | |
77 | } | |
78 | } | |
79 | ||
fae4be38 | 80 | static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) |
6a736033 EA |
81 | { |
82 | uint32_t pending = s->gerror ^ s->gerrorn; | |
83 | uint32_t toggled = s->gerrorn ^ new_gerrorn; | |
84 | ||
85 | if (toggled & ~pending) { | |
86 | qemu_log_mask(LOG_GUEST_ERROR, | |
87 | "guest toggles non pending errors = 0x%x\n", | |
88 | toggled & ~pending); | |
89 | } | |
90 | ||
91 | /* | |
92 | * We do not raise any error in case guest toggles bits corresponding | |
93 | * to not active IRQs (CONSTRAINED UNPREDICTABLE) | |
94 | */ | |
95 | s->gerrorn = new_gerrorn; | |
96 | ||
97 | trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); | |
98 | } | |
99 | ||
dadd1a08 EA |
100 | static inline MemTxResult queue_read(SMMUQueue *q, void *data) |
101 | { | |
102 | dma_addr_t addr = Q_CONS_ENTRY(q); | |
103 | ||
104 | return dma_memory_read(&address_space_memory, addr, data, q->entry_size); | |
105 | } | |
106 | ||
107 | static MemTxResult queue_write(SMMUQueue *q, void *data) | |
108 | { | |
109 | dma_addr_t addr = Q_PROD_ENTRY(q); | |
110 | MemTxResult ret; | |
111 | ||
112 | ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); | |
113 | if (ret != MEMTX_OK) { | |
114 | return ret; | |
115 | } | |
116 | ||
117 | queue_prod_incr(q); | |
118 | return MEMTX_OK; | |
119 | } | |
120 | ||
bb981004 | 121 | static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) |
dadd1a08 EA |
122 | { |
123 | SMMUQueue *q = &s->eventq; | |
bb981004 | 124 | MemTxResult r; |
dadd1a08 EA |
125 | |
126 | if (!smmuv3_eventq_enabled(s)) { | |
bb981004 | 127 | return MEMTX_ERROR; |
dadd1a08 EA |
128 | } |
129 | ||
130 | if (smmuv3_q_full(q)) { | |
bb981004 | 131 | return MEMTX_ERROR; |
dadd1a08 EA |
132 | } |
133 | ||
bb981004 EA |
134 | r = queue_write(q, evt); |
135 | if (r != MEMTX_OK) { | |
136 | return r; | |
137 | } | |
dadd1a08 EA |
138 | |
139 | if (smmuv3_q_empty(q)) { | |
140 | smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); | |
141 | } | |
bb981004 EA |
142 | return MEMTX_OK; |
143 | } | |
144 | ||
145 | void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) | |
146 | { | |
24af32e0 | 147 | Evt evt = {}; |
bb981004 EA |
148 | MemTxResult r; |
149 | ||
150 | if (!smmuv3_eventq_enabled(s)) { | |
151 | return; | |
152 | } | |
153 | ||
154 | EVT_SET_TYPE(&evt, info->type); | |
155 | EVT_SET_SID(&evt, info->sid); | |
156 | ||
157 | switch (info->type) { | |
9122bea9 | 158 | case SMMU_EVT_NONE: |
bb981004 EA |
159 | return; |
160 | case SMMU_EVT_F_UUT: | |
161 | EVT_SET_SSID(&evt, info->u.f_uut.ssid); | |
162 | EVT_SET_SSV(&evt, info->u.f_uut.ssv); | |
163 | EVT_SET_ADDR(&evt, info->u.f_uut.addr); | |
164 | EVT_SET_RNW(&evt, info->u.f_uut.rnw); | |
165 | EVT_SET_PNU(&evt, info->u.f_uut.pnu); | |
166 | EVT_SET_IND(&evt, info->u.f_uut.ind); | |
167 | break; | |
168 | case SMMU_EVT_C_BAD_STREAMID: | |
169 | EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid); | |
170 | EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv); | |
171 | break; | |
172 | case SMMU_EVT_F_STE_FETCH: | |
173 | EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid); | |
174 | EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv); | |
175 | EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr); | |
176 | break; | |
177 | case SMMU_EVT_C_BAD_STE: | |
178 | EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid); | |
179 | EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv); | |
180 | break; | |
181 | case SMMU_EVT_F_STREAM_DISABLED: | |
182 | break; | |
183 | case SMMU_EVT_F_TRANS_FORBIDDEN: | |
184 | EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr); | |
185 | EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw); | |
186 | break; | |
187 | case SMMU_EVT_C_BAD_SUBSTREAMID: | |
188 | EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid); | |
189 | break; | |
190 | case SMMU_EVT_F_CD_FETCH: | |
191 | EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid); | |
192 | EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv); | |
193 | EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr); | |
194 | break; | |
195 | case SMMU_EVT_C_BAD_CD: | |
196 | EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid); | |
197 | EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv); | |
198 | break; | |
199 | case SMMU_EVT_F_WALK_EABT: | |
200 | case SMMU_EVT_F_TRANSLATION: | |
201 | case SMMU_EVT_F_ADDR_SIZE: | |
202 | case SMMU_EVT_F_ACCESS: | |
203 | case SMMU_EVT_F_PERMISSION: | |
204 | EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall); | |
205 | EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag); | |
206 | EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid); | |
207 | EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv); | |
208 | EVT_SET_S2(&evt, info->u.f_walk_eabt.s2); | |
209 | EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr); | |
210 | EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw); | |
211 | EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu); | |
212 | EVT_SET_IND(&evt, info->u.f_walk_eabt.ind); | |
213 | EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class); | |
214 | EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2); | |
215 | break; | |
216 | case SMMU_EVT_F_CFG_CONFLICT: | |
217 | EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid); | |
218 | EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv); | |
219 | break; | |
220 | /* rest is not implemented */ | |
221 | case SMMU_EVT_F_BAD_ATS_TREQ: | |
222 | case SMMU_EVT_F_TLB_CONFLICT: | |
223 | case SMMU_EVT_E_PAGE_REQ: | |
224 | default: | |
225 | g_assert_not_reached(); | |
226 | } | |
227 | ||
228 | trace_smmuv3_record_event(smmu_event_string(info->type), info->sid); | |
229 | r = smmuv3_write_eventq(s, &evt); | |
230 | if (r != MEMTX_OK) { | |
231 | smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK); | |
232 | } | |
233 | info->recorded = true; | |
dadd1a08 EA |
234 | } |
235 | ||
10a83cb9 PM |
236 | static void smmuv3_init_regs(SMMUv3State *s) |
237 | { | |
238 | /** | |
239 | * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, | |
240 | * multi-level stream table | |
241 | */ | |
242 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ | |
243 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ | |
244 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ | |
245 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ | |
246 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ | |
247 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ | |
248 | /* terminated transaction will always be aborted/error returned */ | |
249 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); | |
250 | /* 2-level stream table supported */ | |
251 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); | |
252 | ||
253 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); | |
254 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); | |
255 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); | |
256 | ||
257 | /* 4K and 64K granule support */ | |
258 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); | |
259 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); | |
260 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ | |
261 | ||
262 | s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); | |
263 | s->cmdq.prod = 0; | |
264 | s->cmdq.cons = 0; | |
265 | s->cmdq.entry_size = sizeof(struct Cmd); | |
266 | s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); | |
267 | s->eventq.prod = 0; | |
268 | s->eventq.cons = 0; | |
269 | s->eventq.entry_size = sizeof(struct Evt); | |
270 | ||
271 | s->features = 0; | |
272 | s->sid_split = 0; | |
273 | } | |
274 | ||
9bde7f06 EA |
275 | static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, |
276 | SMMUEventInfo *event) | |
277 | { | |
278 | int ret; | |
279 | ||
280 | trace_smmuv3_get_ste(addr); | |
281 | /* TODO: guarantee 64-bit single-copy atomicity */ | |
282 | ret = dma_memory_read(&address_space_memory, addr, | |
283 | (void *)buf, sizeof(*buf)); | |
284 | if (ret != MEMTX_OK) { | |
285 | qemu_log_mask(LOG_GUEST_ERROR, | |
286 | "Cannot fetch pte at address=0x%"PRIx64"\n", addr); | |
287 | event->type = SMMU_EVT_F_STE_FETCH; | |
288 | event->u.f_ste_fetch.addr = addr; | |
289 | return -EINVAL; | |
290 | } | |
291 | return 0; | |
292 | ||
293 | } | |
294 | ||
295 | /* @ssid > 0 not supported yet */ | |
296 | static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, | |
297 | CD *buf, SMMUEventInfo *event) | |
298 | { | |
299 | dma_addr_t addr = STE_CTXPTR(ste); | |
300 | int ret; | |
301 | ||
302 | trace_smmuv3_get_cd(addr); | |
303 | /* TODO: guarantee 64-bit single-copy atomicity */ | |
304 | ret = dma_memory_read(&address_space_memory, addr, | |
305 | (void *)buf, sizeof(*buf)); | |
306 | if (ret != MEMTX_OK) { | |
307 | qemu_log_mask(LOG_GUEST_ERROR, | |
308 | "Cannot fetch pte at address=0x%"PRIx64"\n", addr); | |
309 | event->type = SMMU_EVT_F_CD_FETCH; | |
310 | event->u.f_ste_fetch.addr = addr; | |
311 | return -EINVAL; | |
312 | } | |
313 | return 0; | |
314 | } | |
315 | ||
9122bea9 | 316 | /* Returns < 0 in case of invalid STE, 0 otherwise */ |
9bde7f06 EA |
317 | static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, |
318 | STE *ste, SMMUEventInfo *event) | |
319 | { | |
320 | uint32_t config; | |
9bde7f06 EA |
321 | |
322 | if (!STE_VALID(ste)) { | |
323 | goto bad_ste; | |
324 | } | |
325 | ||
326 | config = STE_CONFIG(ste); | |
327 | ||
328 | if (STE_CFG_ABORT(config)) { | |
9122bea9 JH |
329 | cfg->aborted = true; |
330 | return 0; | |
9bde7f06 EA |
331 | } |
332 | ||
333 | if (STE_CFG_BYPASS(config)) { | |
334 | cfg->bypassed = true; | |
9122bea9 | 335 | return 0; |
9bde7f06 EA |
336 | } |
337 | ||
338 | if (STE_CFG_S2_ENABLED(config)) { | |
339 | qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); | |
340 | goto bad_ste; | |
341 | } | |
342 | ||
343 | if (STE_S1CDMAX(ste) != 0) { | |
344 | qemu_log_mask(LOG_UNIMP, | |
345 | "SMMUv3 does not support multiple context descriptors yet\n"); | |
346 | goto bad_ste; | |
347 | } | |
348 | ||
349 | if (STE_S1STALLD(ste)) { | |
350 | qemu_log_mask(LOG_UNIMP, | |
351 | "SMMUv3 S1 stalling fault model not allowed yet\n"); | |
352 | goto bad_ste; | |
353 | } | |
354 | return 0; | |
355 | ||
356 | bad_ste: | |
357 | event->type = SMMU_EVT_C_BAD_STE; | |
358 | return -EINVAL; | |
359 | } | |
360 | ||
361 | /** | |
362 | * smmu_find_ste - Return the stream table entry associated | |
363 | * to the sid | |
364 | * | |
365 | * @s: smmuv3 handle | |
366 | * @sid: stream ID | |
367 | * @ste: returned stream table entry | |
368 | * @event: handle to an event info | |
369 | * | |
370 | * Supports linear and 2-level stream table | |
371 | * Return 0 on success, -EINVAL otherwise | |
372 | */ | |
373 | static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, | |
374 | SMMUEventInfo *event) | |
375 | { | |
376 | dma_addr_t addr; | |
377 | int ret; | |
378 | ||
379 | trace_smmuv3_find_ste(sid, s->features, s->sid_split); | |
380 | /* Check SID range */ | |
381 | if (sid > (1 << SMMU_IDR1_SIDSIZE)) { | |
382 | event->type = SMMU_EVT_C_BAD_STREAMID; | |
383 | return -EINVAL; | |
384 | } | |
385 | if (s->features & SMMU_FEATURE_2LVL_STE) { | |
386 | int l1_ste_offset, l2_ste_offset, max_l2_ste, span; | |
387 | dma_addr_t strtab_base, l1ptr, l2ptr; | |
388 | STEDesc l1std; | |
389 | ||
390 | strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK; | |
391 | l1_ste_offset = sid >> s->sid_split; | |
392 | l2_ste_offset = sid & ((1 << s->sid_split) - 1); | |
393 | l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); | |
394 | /* TODO: guarantee 64-bit single-copy atomicity */ | |
395 | ret = dma_memory_read(&address_space_memory, l1ptr, | |
396 | (uint8_t *)&l1std, sizeof(l1std)); | |
397 | if (ret != MEMTX_OK) { | |
398 | qemu_log_mask(LOG_GUEST_ERROR, | |
399 | "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); | |
400 | event->type = SMMU_EVT_F_STE_FETCH; | |
401 | event->u.f_ste_fetch.addr = l1ptr; | |
402 | return -EINVAL; | |
403 | } | |
404 | ||
405 | span = L1STD_SPAN(&l1std); | |
406 | ||
407 | if (!span) { | |
408 | /* l2ptr is not valid */ | |
409 | qemu_log_mask(LOG_GUEST_ERROR, | |
410 | "invalid sid=%d (L1STD span=0)\n", sid); | |
411 | event->type = SMMU_EVT_C_BAD_STREAMID; | |
412 | return -EINVAL; | |
413 | } | |
414 | max_l2_ste = (1 << span) - 1; | |
415 | l2ptr = l1std_l2ptr(&l1std); | |
416 | trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset, | |
417 | l2ptr, l2_ste_offset, max_l2_ste); | |
418 | if (l2_ste_offset > max_l2_ste) { | |
419 | qemu_log_mask(LOG_GUEST_ERROR, | |
420 | "l2_ste_offset=%d > max_l2_ste=%d\n", | |
421 | l2_ste_offset, max_l2_ste); | |
422 | event->type = SMMU_EVT_C_BAD_STE; | |
423 | return -EINVAL; | |
424 | } | |
425 | addr = l2ptr + l2_ste_offset * sizeof(*ste); | |
426 | } else { | |
427 | addr = s->strtab_base + sid * sizeof(*ste); | |
428 | } | |
429 | ||
430 | if (smmu_get_ste(s, addr, ste, event)) { | |
431 | return -EINVAL; | |
432 | } | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
437 | static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) | |
438 | { | |
439 | int ret = -EINVAL; | |
440 | int i; | |
441 | ||
442 | if (!CD_VALID(cd) || !CD_AARCH64(cd)) { | |
443 | goto bad_cd; | |
444 | } | |
445 | if (!CD_A(cd)) { | |
446 | goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */ | |
447 | } | |
448 | if (CD_S(cd)) { | |
449 | goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */ | |
450 | } | |
451 | if (CD_HA(cd) || CD_HD(cd)) { | |
452 | goto bad_cd; /* HTTU = 0 */ | |
453 | } | |
454 | ||
455 | /* we support only those at the moment */ | |
456 | cfg->aa64 = true; | |
457 | cfg->stage = 1; | |
458 | ||
459 | cfg->oas = oas2bits(CD_IPS(cd)); | |
460 | cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); | |
461 | cfg->tbi = CD_TBI(cd); | |
462 | cfg->asid = CD_ASID(cd); | |
463 | ||
464 | trace_smmuv3_decode_cd(cfg->oas); | |
465 | ||
466 | /* decode data dependent on TT */ | |
467 | for (i = 0; i <= 1; i++) { | |
468 | int tg, tsz; | |
469 | SMMUTransTableInfo *tt = &cfg->tt[i]; | |
470 | ||
471 | cfg->tt[i].disabled = CD_EPD(cd, i); | |
472 | if (cfg->tt[i].disabled) { | |
473 | continue; | |
474 | } | |
475 | ||
476 | tsz = CD_TSZ(cd, i); | |
477 | if (tsz < 16 || tsz > 39) { | |
478 | goto bad_cd; | |
479 | } | |
480 | ||
481 | tg = CD_TG(cd, i); | |
482 | tt->granule_sz = tg2granule(tg, i); | |
483 | if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) { | |
484 | goto bad_cd; | |
485 | } | |
486 | ||
487 | tt->tsz = tsz; | |
488 | tt->ttb = CD_TTB(cd, i); | |
489 | if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { | |
490 | goto bad_cd; | |
491 | } | |
492 | trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz); | |
493 | } | |
494 | ||
495 | event->record_trans_faults = CD_R(cd); | |
496 | ||
497 | return 0; | |
498 | ||
499 | bad_cd: | |
500 | event->type = SMMU_EVT_C_BAD_CD; | |
501 | return ret; | |
502 | } | |
503 | ||
504 | /** | |
505 | * smmuv3_decode_config - Prepare the translation configuration | |
506 | * for the @mr iommu region | |
507 | * @mr: iommu memory region the translation config must be prepared for | |
508 | * @cfg: output translation configuration which is populated through | |
509 | * the different configuration decoding steps | |
510 | * @event: must be zero'ed by the caller | |
511 | * | |
9122bea9 | 512 | * return < 0 in case of config decoding error (@event is filled |
9bde7f06 EA |
513 | * accordingly). Return 0 otherwise. |
514 | */ | |
515 | static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, | |
516 | SMMUEventInfo *event) | |
517 | { | |
518 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); | |
519 | uint32_t sid = smmu_get_sid(sdev); | |
520 | SMMUv3State *s = sdev->smmu; | |
9122bea9 | 521 | int ret; |
9bde7f06 EA |
522 | STE ste; |
523 | CD cd; | |
524 | ||
9122bea9 JH |
525 | ret = smmu_find_ste(s, sid, &ste, event); |
526 | if (ret) { | |
9bde7f06 EA |
527 | return ret; |
528 | } | |
529 | ||
9122bea9 JH |
530 | ret = decode_ste(s, cfg, &ste, event); |
531 | if (ret) { | |
9bde7f06 EA |
532 | return ret; |
533 | } | |
534 | ||
9122bea9 JH |
535 | if (cfg->aborted || cfg->bypassed) { |
536 | return 0; | |
537 | } | |
538 | ||
539 | ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); | |
540 | if (ret) { | |
9bde7f06 EA |
541 | return ret; |
542 | } | |
543 | ||
544 | return decode_cd(cfg, &cd, event); | |
545 | } | |
546 | ||
32cfd7f3 EA |
547 | /** |
548 | * smmuv3_get_config - Look up for a cached copy of configuration data for | |
549 | * @sdev and on cache miss performs a configuration structure decoding from | |
550 | * guest RAM. | |
551 | * | |
552 | * @sdev: SMMUDevice handle | |
553 | * @event: output event info | |
554 | * | |
555 | * The configuration cache contains data resulting from both STE and CD | |
556 | * decoding under the form of an SMMUTransCfg struct. The hash table is indexed | |
557 | * by the SMMUDevice handle. | |
558 | */ | |
559 | static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) | |
560 | { | |
561 | SMMUv3State *s = sdev->smmu; | |
562 | SMMUState *bc = &s->smmu_state; | |
563 | SMMUTransCfg *cfg; | |
564 | ||
565 | cfg = g_hash_table_lookup(bc->configs, sdev); | |
566 | if (cfg) { | |
567 | sdev->cfg_cache_hits++; | |
568 | trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), | |
569 | sdev->cfg_cache_hits, sdev->cfg_cache_misses, | |
570 | 100 * sdev->cfg_cache_hits / | |
571 | (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); | |
572 | } else { | |
573 | sdev->cfg_cache_misses++; | |
574 | trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), | |
575 | sdev->cfg_cache_hits, sdev->cfg_cache_misses, | |
576 | 100 * sdev->cfg_cache_hits / | |
577 | (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); | |
578 | cfg = g_new0(SMMUTransCfg, 1); | |
579 | ||
580 | if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { | |
581 | g_hash_table_insert(bc->configs, sdev, cfg); | |
582 | } else { | |
583 | g_free(cfg); | |
584 | cfg = NULL; | |
585 | } | |
586 | } | |
587 | return cfg; | |
588 | } | |
589 | ||
590 | static void smmuv3_flush_config(SMMUDevice *sdev) | |
591 | { | |
592 | SMMUv3State *s = sdev->smmu; | |
593 | SMMUState *bc = &s->smmu_state; | |
594 | ||
595 | trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); | |
596 | g_hash_table_remove(bc->configs, sdev); | |
597 | } | |
598 | ||
9bde7f06 | 599 | static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, |
2c91bcf2 | 600 | IOMMUAccessFlags flag, int iommu_idx) |
9bde7f06 EA |
601 | { |
602 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); | |
603 | SMMUv3State *s = sdev->smmu; | |
604 | uint32_t sid = smmu_get_sid(sdev); | |
9122bea9 | 605 | SMMUEventInfo event = {.type = SMMU_EVT_NONE, .sid = sid}; |
9bde7f06 | 606 | SMMUPTWEventInfo ptw_info = {}; |
9122bea9 | 607 | SMMUTranslationStatus status; |
32cfd7f3 | 608 | SMMUTransCfg *cfg = NULL; |
9bde7f06 EA |
609 | IOMMUTLBEntry entry = { |
610 | .target_as = &address_space_memory, | |
611 | .iova = addr, | |
612 | .translated_addr = addr, | |
613 | .addr_mask = ~(hwaddr)0, | |
614 | .perm = IOMMU_NONE, | |
615 | }; | |
9bde7f06 | 616 | |
32cfd7f3 EA |
617 | qemu_mutex_lock(&s->mutex); |
618 | ||
9bde7f06 | 619 | if (!smmu_enabled(s)) { |
9122bea9 JH |
620 | status = SMMU_TRANS_DISABLE; |
621 | goto epilogue; | |
9bde7f06 EA |
622 | } |
623 | ||
32cfd7f3 EA |
624 | cfg = smmuv3_get_config(sdev, &event); |
625 | if (!cfg) { | |
9122bea9 JH |
626 | status = SMMU_TRANS_ERROR; |
627 | goto epilogue; | |
9bde7f06 EA |
628 | } |
629 | ||
32cfd7f3 | 630 | if (cfg->aborted) { |
9122bea9 JH |
631 | status = SMMU_TRANS_ABORT; |
632 | goto epilogue; | |
9bde7f06 EA |
633 | } |
634 | ||
32cfd7f3 | 635 | if (cfg->bypassed) { |
9122bea9 JH |
636 | status = SMMU_TRANS_BYPASS; |
637 | goto epilogue; | |
638 | } | |
639 | ||
32cfd7f3 | 640 | if (smmu_ptw(cfg, addr, flag, &entry, &ptw_info)) { |
9bde7f06 EA |
641 | switch (ptw_info.type) { |
642 | case SMMU_PTW_ERR_WALK_EABT: | |
643 | event.type = SMMU_EVT_F_WALK_EABT; | |
644 | event.u.f_walk_eabt.addr = addr; | |
645 | event.u.f_walk_eabt.rnw = flag & 0x1; | |
646 | event.u.f_walk_eabt.class = 0x1; | |
647 | event.u.f_walk_eabt.addr2 = ptw_info.addr; | |
648 | break; | |
649 | case SMMU_PTW_ERR_TRANSLATION: | |
650 | if (event.record_trans_faults) { | |
651 | event.type = SMMU_EVT_F_TRANSLATION; | |
652 | event.u.f_translation.addr = addr; | |
653 | event.u.f_translation.rnw = flag & 0x1; | |
654 | } | |
655 | break; | |
656 | case SMMU_PTW_ERR_ADDR_SIZE: | |
657 | if (event.record_trans_faults) { | |
658 | event.type = SMMU_EVT_F_ADDR_SIZE; | |
659 | event.u.f_addr_size.addr = addr; | |
660 | event.u.f_addr_size.rnw = flag & 0x1; | |
661 | } | |
662 | break; | |
663 | case SMMU_PTW_ERR_ACCESS: | |
664 | if (event.record_trans_faults) { | |
665 | event.type = SMMU_EVT_F_ACCESS; | |
666 | event.u.f_access.addr = addr; | |
667 | event.u.f_access.rnw = flag & 0x1; | |
668 | } | |
669 | break; | |
670 | case SMMU_PTW_ERR_PERMISSION: | |
671 | if (event.record_trans_faults) { | |
672 | event.type = SMMU_EVT_F_PERMISSION; | |
673 | event.u.f_permission.addr = addr; | |
674 | event.u.f_permission.rnw = flag & 0x1; | |
675 | } | |
676 | break; | |
677 | default: | |
678 | g_assert_not_reached(); | |
679 | } | |
9122bea9 JH |
680 | status = SMMU_TRANS_ERROR; |
681 | } else { | |
682 | status = SMMU_TRANS_SUCCESS; | |
9bde7f06 | 683 | } |
9122bea9 JH |
684 | |
685 | epilogue: | |
32cfd7f3 | 686 | qemu_mutex_unlock(&s->mutex); |
9122bea9 JH |
687 | switch (status) { |
688 | case SMMU_TRANS_SUCCESS: | |
689 | entry.perm = flag; | |
690 | trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, | |
691 | entry.translated_addr, entry.perm); | |
692 | break; | |
693 | case SMMU_TRANS_DISABLE: | |
694 | entry.perm = flag; | |
695 | entry.addr_mask = ~TARGET_PAGE_MASK; | |
696 | trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, | |
697 | entry.perm); | |
698 | break; | |
699 | case SMMU_TRANS_BYPASS: | |
700 | entry.perm = flag; | |
701 | entry.addr_mask = ~TARGET_PAGE_MASK; | |
702 | trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, | |
703 | entry.perm); | |
704 | break; | |
705 | case SMMU_TRANS_ABORT: | |
706 | /* no event is recorded on abort */ | |
707 | trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, | |
708 | entry.perm); | |
709 | break; | |
710 | case SMMU_TRANS_ERROR: | |
9bde7f06 | 711 | qemu_log_mask(LOG_GUEST_ERROR, |
9122bea9 JH |
712 | "%s translation failed for iova=0x%"PRIx64"(%s)\n", |
713 | mr->parent_obj.name, addr, smmu_event_string(event.type)); | |
9bde7f06 | 714 | smmuv3_record_event(s, &event); |
9122bea9 | 715 | break; |
9bde7f06 EA |
716 | } |
717 | ||
718 | return entry; | |
719 | } | |
720 | ||
fae4be38 | 721 | static int smmuv3_cmdq_consume(SMMUv3State *s) |
dadd1a08 | 722 | { |
32cfd7f3 | 723 | SMMUState *bs = ARM_SMMU(s); |
dadd1a08 EA |
724 | SMMUCmdError cmd_error = SMMU_CERROR_NONE; |
725 | SMMUQueue *q = &s->cmdq; | |
726 | SMMUCommandType type = 0; | |
727 | ||
728 | if (!smmuv3_cmdq_enabled(s)) { | |
729 | return 0; | |
730 | } | |
731 | /* | |
732 | * some commands depend on register values, typically CR0. In case those | |
733 | * register values change while handling the command, spec says it | |
734 | * is UNPREDICTABLE whether the command is interpreted under the new | |
735 | * or old value. | |
736 | */ | |
737 | ||
738 | while (!smmuv3_q_empty(q)) { | |
739 | uint32_t pending = s->gerror ^ s->gerrorn; | |
740 | Cmd cmd; | |
741 | ||
742 | trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), | |
743 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); | |
744 | ||
745 | if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { | |
746 | break; | |
747 | } | |
748 | ||
749 | if (queue_read(q, &cmd) != MEMTX_OK) { | |
750 | cmd_error = SMMU_CERROR_ABT; | |
751 | break; | |
752 | } | |
753 | ||
754 | type = CMD_TYPE(&cmd); | |
755 | ||
756 | trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); | |
757 | ||
32cfd7f3 | 758 | qemu_mutex_lock(&s->mutex); |
dadd1a08 EA |
759 | switch (type) { |
760 | case SMMU_CMD_SYNC: | |
761 | if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { | |
762 | smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); | |
763 | } | |
764 | break; | |
765 | case SMMU_CMD_PREFETCH_CONFIG: | |
766 | case SMMU_CMD_PREFETCH_ADDR: | |
32cfd7f3 | 767 | break; |
dadd1a08 | 768 | case SMMU_CMD_CFGI_STE: |
32cfd7f3 EA |
769 | { |
770 | uint32_t sid = CMD_SID(&cmd); | |
771 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); | |
772 | SMMUDevice *sdev; | |
773 | ||
774 | if (CMD_SSEC(&cmd)) { | |
775 | cmd_error = SMMU_CERROR_ILL; | |
776 | break; | |
777 | } | |
778 | ||
779 | if (!mr) { | |
780 | break; | |
781 | } | |
782 | ||
783 | trace_smmuv3_cmdq_cfgi_ste(sid); | |
784 | sdev = container_of(mr, SMMUDevice, iommu); | |
785 | smmuv3_flush_config(sdev); | |
786 | ||
787 | break; | |
788 | } | |
dadd1a08 | 789 | case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ |
32cfd7f3 EA |
790 | { |
791 | uint32_t start = CMD_SID(&cmd), end, i; | |
792 | uint8_t range = CMD_STE_RANGE(&cmd); | |
793 | ||
794 | if (CMD_SSEC(&cmd)) { | |
795 | cmd_error = SMMU_CERROR_ILL; | |
796 | break; | |
797 | } | |
798 | ||
799 | end = start + (1 << (range + 1)) - 1; | |
800 | trace_smmuv3_cmdq_cfgi_ste_range(start, end); | |
801 | ||
802 | for (i = start; i <= end; i++) { | |
803 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i); | |
804 | SMMUDevice *sdev; | |
805 | ||
806 | if (!mr) { | |
807 | continue; | |
808 | } | |
809 | sdev = container_of(mr, SMMUDevice, iommu); | |
810 | smmuv3_flush_config(sdev); | |
811 | } | |
812 | break; | |
813 | } | |
dadd1a08 EA |
814 | case SMMU_CMD_CFGI_CD: |
815 | case SMMU_CMD_CFGI_CD_ALL: | |
32cfd7f3 EA |
816 | { |
817 | uint32_t sid = CMD_SID(&cmd); | |
818 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); | |
819 | SMMUDevice *sdev; | |
820 | ||
821 | if (CMD_SSEC(&cmd)) { | |
822 | cmd_error = SMMU_CERROR_ILL; | |
823 | break; | |
824 | } | |
825 | ||
826 | if (!mr) { | |
827 | break; | |
828 | } | |
829 | ||
830 | trace_smmuv3_cmdq_cfgi_cd(sid); | |
831 | sdev = container_of(mr, SMMUDevice, iommu); | |
832 | smmuv3_flush_config(sdev); | |
833 | break; | |
834 | } | |
dadd1a08 EA |
835 | case SMMU_CMD_TLBI_NH_ALL: |
836 | case SMMU_CMD_TLBI_NH_ASID: | |
837 | case SMMU_CMD_TLBI_NH_VA: | |
838 | case SMMU_CMD_TLBI_NH_VAA: | |
839 | case SMMU_CMD_TLBI_EL3_ALL: | |
840 | case SMMU_CMD_TLBI_EL3_VA: | |
841 | case SMMU_CMD_TLBI_EL2_ALL: | |
842 | case SMMU_CMD_TLBI_EL2_ASID: | |
843 | case SMMU_CMD_TLBI_EL2_VA: | |
844 | case SMMU_CMD_TLBI_EL2_VAA: | |
845 | case SMMU_CMD_TLBI_S12_VMALL: | |
846 | case SMMU_CMD_TLBI_S2_IPA: | |
847 | case SMMU_CMD_TLBI_NSNH_ALL: | |
848 | case SMMU_CMD_ATC_INV: | |
849 | case SMMU_CMD_PRI_RESP: | |
850 | case SMMU_CMD_RESUME: | |
851 | case SMMU_CMD_STALL_TERM: | |
852 | trace_smmuv3_unhandled_cmd(type); | |
853 | break; | |
854 | default: | |
855 | cmd_error = SMMU_CERROR_ILL; | |
856 | qemu_log_mask(LOG_GUEST_ERROR, | |
857 | "Illegal command type: %d\n", CMD_TYPE(&cmd)); | |
858 | break; | |
859 | } | |
32cfd7f3 | 860 | qemu_mutex_unlock(&s->mutex); |
dadd1a08 EA |
861 | if (cmd_error) { |
862 | break; | |
863 | } | |
864 | /* | |
865 | * We only increment the cons index after the completion of | |
866 | * the command. We do that because the SYNC returns immediately | |
867 | * and does not check the completion of previous commands | |
868 | */ | |
869 | queue_cons_incr(q); | |
870 | } | |
871 | ||
872 | if (cmd_error) { | |
873 | trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); | |
874 | smmu_write_cmdq_err(s, cmd_error); | |
875 | smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); | |
876 | } | |
877 | ||
878 | trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), | |
879 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); | |
880 | ||
881 | return 0; | |
882 | } | |
883 | ||
fae4be38 EA |
884 | static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, |
885 | uint64_t data, MemTxAttrs attrs) | |
886 | { | |
887 | switch (offset) { | |
888 | case A_GERROR_IRQ_CFG0: | |
889 | s->gerror_irq_cfg0 = data; | |
890 | return MEMTX_OK; | |
891 | case A_STRTAB_BASE: | |
892 | s->strtab_base = data; | |
893 | return MEMTX_OK; | |
894 | case A_CMDQ_BASE: | |
895 | s->cmdq.base = data; | |
896 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); | |
897 | if (s->cmdq.log2size > SMMU_CMDQS) { | |
898 | s->cmdq.log2size = SMMU_CMDQS; | |
899 | } | |
900 | return MEMTX_OK; | |
901 | case A_EVENTQ_BASE: | |
902 | s->eventq.base = data; | |
903 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); | |
904 | if (s->eventq.log2size > SMMU_EVENTQS) { | |
905 | s->eventq.log2size = SMMU_EVENTQS; | |
906 | } | |
907 | return MEMTX_OK; | |
908 | case A_EVENTQ_IRQ_CFG0: | |
909 | s->eventq_irq_cfg0 = data; | |
910 | return MEMTX_OK; | |
911 | default: | |
912 | qemu_log_mask(LOG_UNIMP, | |
913 | "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", | |
914 | __func__, offset); | |
915 | return MEMTX_OK; | |
916 | } | |
917 | } | |
918 | ||
919 | static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, | |
920 | uint64_t data, MemTxAttrs attrs) | |
921 | { | |
922 | switch (offset) { | |
923 | case A_CR0: | |
924 | s->cr[0] = data; | |
925 | s->cr0ack = data & ~SMMU_CR0_RESERVED; | |
926 | /* in case the command queue has been enabled */ | |
927 | smmuv3_cmdq_consume(s); | |
928 | return MEMTX_OK; | |
929 | case A_CR1: | |
930 | s->cr[1] = data; | |
931 | return MEMTX_OK; | |
932 | case A_CR2: | |
933 | s->cr[2] = data; | |
934 | return MEMTX_OK; | |
935 | case A_IRQ_CTRL: | |
936 | s->irq_ctrl = data; | |
937 | return MEMTX_OK; | |
938 | case A_GERRORN: | |
939 | smmuv3_write_gerrorn(s, data); | |
940 | /* | |
941 | * By acknowledging the CMDQ_ERR, SW may notify cmds can | |
942 | * be processed again | |
943 | */ | |
944 | smmuv3_cmdq_consume(s); | |
945 | return MEMTX_OK; | |
946 | case A_GERROR_IRQ_CFG0: /* 64b */ | |
947 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); | |
948 | return MEMTX_OK; | |
949 | case A_GERROR_IRQ_CFG0 + 4: | |
950 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); | |
951 | return MEMTX_OK; | |
952 | case A_GERROR_IRQ_CFG1: | |
953 | s->gerror_irq_cfg1 = data; | |
954 | return MEMTX_OK; | |
955 | case A_GERROR_IRQ_CFG2: | |
956 | s->gerror_irq_cfg2 = data; | |
957 | return MEMTX_OK; | |
958 | case A_STRTAB_BASE: /* 64b */ | |
959 | s->strtab_base = deposit64(s->strtab_base, 0, 32, data); | |
960 | return MEMTX_OK; | |
961 | case A_STRTAB_BASE + 4: | |
962 | s->strtab_base = deposit64(s->strtab_base, 32, 32, data); | |
963 | return MEMTX_OK; | |
964 | case A_STRTAB_BASE_CFG: | |
965 | s->strtab_base_cfg = data; | |
966 | if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { | |
967 | s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); | |
968 | s->features |= SMMU_FEATURE_2LVL_STE; | |
969 | } | |
970 | return MEMTX_OK; | |
971 | case A_CMDQ_BASE: /* 64b */ | |
972 | s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); | |
973 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); | |
974 | if (s->cmdq.log2size > SMMU_CMDQS) { | |
975 | s->cmdq.log2size = SMMU_CMDQS; | |
976 | } | |
977 | return MEMTX_OK; | |
978 | case A_CMDQ_BASE + 4: /* 64b */ | |
979 | s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); | |
980 | return MEMTX_OK; | |
981 | case A_CMDQ_PROD: | |
982 | s->cmdq.prod = data; | |
983 | smmuv3_cmdq_consume(s); | |
984 | return MEMTX_OK; | |
985 | case A_CMDQ_CONS: | |
986 | s->cmdq.cons = data; | |
987 | return MEMTX_OK; | |
988 | case A_EVENTQ_BASE: /* 64b */ | |
989 | s->eventq.base = deposit64(s->eventq.base, 0, 32, data); | |
990 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); | |
991 | if (s->eventq.log2size > SMMU_EVENTQS) { | |
992 | s->eventq.log2size = SMMU_EVENTQS; | |
993 | } | |
994 | return MEMTX_OK; | |
995 | case A_EVENTQ_BASE + 4: | |
996 | s->eventq.base = deposit64(s->eventq.base, 32, 32, data); | |
997 | return MEMTX_OK; | |
998 | case A_EVENTQ_PROD: | |
999 | s->eventq.prod = data; | |
1000 | return MEMTX_OK; | |
1001 | case A_EVENTQ_CONS: | |
1002 | s->eventq.cons = data; | |
1003 | return MEMTX_OK; | |
1004 | case A_EVENTQ_IRQ_CFG0: /* 64b */ | |
1005 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); | |
1006 | return MEMTX_OK; | |
1007 | case A_EVENTQ_IRQ_CFG0 + 4: | |
1008 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); | |
1009 | return MEMTX_OK; | |
1010 | case A_EVENTQ_IRQ_CFG1: | |
1011 | s->eventq_irq_cfg1 = data; | |
1012 | return MEMTX_OK; | |
1013 | case A_EVENTQ_IRQ_CFG2: | |
1014 | s->eventq_irq_cfg2 = data; | |
1015 | return MEMTX_OK; | |
1016 | default: | |
1017 | qemu_log_mask(LOG_UNIMP, | |
1018 | "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", | |
1019 | __func__, offset); | |
1020 | return MEMTX_OK; | |
1021 | } | |
1022 | } | |
1023 | ||
10a83cb9 PM |
1024 | static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, |
1025 | unsigned size, MemTxAttrs attrs) | |
1026 | { | |
fae4be38 EA |
1027 | SMMUState *sys = opaque; |
1028 | SMMUv3State *s = ARM_SMMUV3(sys); | |
1029 | MemTxResult r; | |
1030 | ||
1031 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ | |
1032 | offset &= ~0x10000; | |
1033 | ||
1034 | switch (size) { | |
1035 | case 8: | |
1036 | r = smmu_writell(s, offset, data, attrs); | |
1037 | break; | |
1038 | case 4: | |
1039 | r = smmu_writel(s, offset, data, attrs); | |
1040 | break; | |
1041 | default: | |
1042 | r = MEMTX_ERROR; | |
1043 | break; | |
1044 | } | |
1045 | ||
1046 | trace_smmuv3_write_mmio(offset, data, size, r); | |
1047 | return r; | |
10a83cb9 PM |
1048 | } |
1049 | ||
1050 | static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, | |
1051 | uint64_t *data, MemTxAttrs attrs) | |
1052 | { | |
1053 | switch (offset) { | |
1054 | case A_GERROR_IRQ_CFG0: | |
1055 | *data = s->gerror_irq_cfg0; | |
1056 | return MEMTX_OK; | |
1057 | case A_STRTAB_BASE: | |
1058 | *data = s->strtab_base; | |
1059 | return MEMTX_OK; | |
1060 | case A_CMDQ_BASE: | |
1061 | *data = s->cmdq.base; | |
1062 | return MEMTX_OK; | |
1063 | case A_EVENTQ_BASE: | |
1064 | *data = s->eventq.base; | |
1065 | return MEMTX_OK; | |
1066 | default: | |
1067 | *data = 0; | |
1068 | qemu_log_mask(LOG_UNIMP, | |
1069 | "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n", | |
1070 | __func__, offset); | |
1071 | return MEMTX_OK; | |
1072 | } | |
1073 | } | |
1074 | ||
1075 | static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, | |
1076 | uint64_t *data, MemTxAttrs attrs) | |
1077 | { | |
1078 | switch (offset) { | |
1079 | case A_IDREGS ... A_IDREGS + 0x1f: | |
1080 | *data = smmuv3_idreg(offset - A_IDREGS); | |
1081 | return MEMTX_OK; | |
1082 | case A_IDR0 ... A_IDR5: | |
1083 | *data = s->idr[(offset - A_IDR0) / 4]; | |
1084 | return MEMTX_OK; | |
1085 | case A_IIDR: | |
1086 | *data = s->iidr; | |
1087 | return MEMTX_OK; | |
1088 | case A_CR0: | |
1089 | *data = s->cr[0]; | |
1090 | return MEMTX_OK; | |
1091 | case A_CR0ACK: | |
1092 | *data = s->cr0ack; | |
1093 | return MEMTX_OK; | |
1094 | case A_CR1: | |
1095 | *data = s->cr[1]; | |
1096 | return MEMTX_OK; | |
1097 | case A_CR2: | |
1098 | *data = s->cr[2]; | |
1099 | return MEMTX_OK; | |
1100 | case A_STATUSR: | |
1101 | *data = s->statusr; | |
1102 | return MEMTX_OK; | |
1103 | case A_IRQ_CTRL: | |
1104 | case A_IRQ_CTRL_ACK: | |
1105 | *data = s->irq_ctrl; | |
1106 | return MEMTX_OK; | |
1107 | case A_GERROR: | |
1108 | *data = s->gerror; | |
1109 | return MEMTX_OK; | |
1110 | case A_GERRORN: | |
1111 | *data = s->gerrorn; | |
1112 | return MEMTX_OK; | |
1113 | case A_GERROR_IRQ_CFG0: /* 64b */ | |
1114 | *data = extract64(s->gerror_irq_cfg0, 0, 32); | |
1115 | return MEMTX_OK; | |
1116 | case A_GERROR_IRQ_CFG0 + 4: | |
1117 | *data = extract64(s->gerror_irq_cfg0, 32, 32); | |
1118 | return MEMTX_OK; | |
1119 | case A_GERROR_IRQ_CFG1: | |
1120 | *data = s->gerror_irq_cfg1; | |
1121 | return MEMTX_OK; | |
1122 | case A_GERROR_IRQ_CFG2: | |
1123 | *data = s->gerror_irq_cfg2; | |
1124 | return MEMTX_OK; | |
1125 | case A_STRTAB_BASE: /* 64b */ | |
1126 | *data = extract64(s->strtab_base, 0, 32); | |
1127 | return MEMTX_OK; | |
1128 | case A_STRTAB_BASE + 4: /* 64b */ | |
1129 | *data = extract64(s->strtab_base, 32, 32); | |
1130 | return MEMTX_OK; | |
1131 | case A_STRTAB_BASE_CFG: | |
1132 | *data = s->strtab_base_cfg; | |
1133 | return MEMTX_OK; | |
1134 | case A_CMDQ_BASE: /* 64b */ | |
1135 | *data = extract64(s->cmdq.base, 0, 32); | |
1136 | return MEMTX_OK; | |
1137 | case A_CMDQ_BASE + 4: | |
1138 | *data = extract64(s->cmdq.base, 32, 32); | |
1139 | return MEMTX_OK; | |
1140 | case A_CMDQ_PROD: | |
1141 | *data = s->cmdq.prod; | |
1142 | return MEMTX_OK; | |
1143 | case A_CMDQ_CONS: | |
1144 | *data = s->cmdq.cons; | |
1145 | return MEMTX_OK; | |
1146 | case A_EVENTQ_BASE: /* 64b */ | |
1147 | *data = extract64(s->eventq.base, 0, 32); | |
1148 | return MEMTX_OK; | |
1149 | case A_EVENTQ_BASE + 4: /* 64b */ | |
1150 | *data = extract64(s->eventq.base, 32, 32); | |
1151 | return MEMTX_OK; | |
1152 | case A_EVENTQ_PROD: | |
1153 | *data = s->eventq.prod; | |
1154 | return MEMTX_OK; | |
1155 | case A_EVENTQ_CONS: | |
1156 | *data = s->eventq.cons; | |
1157 | return MEMTX_OK; | |
1158 | default: | |
1159 | *data = 0; | |
1160 | qemu_log_mask(LOG_UNIMP, | |
1161 | "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n", | |
1162 | __func__, offset); | |
1163 | return MEMTX_OK; | |
1164 | } | |
1165 | } | |
1166 | ||
1167 | static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, | |
1168 | unsigned size, MemTxAttrs attrs) | |
1169 | { | |
1170 | SMMUState *sys = opaque; | |
1171 | SMMUv3State *s = ARM_SMMUV3(sys); | |
1172 | MemTxResult r; | |
1173 | ||
1174 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ | |
1175 | offset &= ~0x10000; | |
1176 | ||
1177 | switch (size) { | |
1178 | case 8: | |
1179 | r = smmu_readll(s, offset, data, attrs); | |
1180 | break; | |
1181 | case 4: | |
1182 | r = smmu_readl(s, offset, data, attrs); | |
1183 | break; | |
1184 | default: | |
1185 | r = MEMTX_ERROR; | |
1186 | break; | |
1187 | } | |
1188 | ||
1189 | trace_smmuv3_read_mmio(offset, *data, size, r); | |
1190 | return r; | |
1191 | } | |
1192 | ||
1193 | static const MemoryRegionOps smmu_mem_ops = { | |
1194 | .read_with_attrs = smmu_read_mmio, | |
1195 | .write_with_attrs = smmu_write_mmio, | |
1196 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1197 | .valid = { | |
1198 | .min_access_size = 4, | |
1199 | .max_access_size = 8, | |
1200 | }, | |
1201 | .impl = { | |
1202 | .min_access_size = 4, | |
1203 | .max_access_size = 8, | |
1204 | }, | |
1205 | }; | |
1206 | ||
1207 | static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) | |
1208 | { | |
1209 | int i; | |
1210 | ||
1211 | for (i = 0; i < ARRAY_SIZE(s->irq); i++) { | |
1212 | sysbus_init_irq(dev, &s->irq[i]); | |
1213 | } | |
1214 | } | |
1215 | ||
1216 | static void smmu_reset(DeviceState *dev) | |
1217 | { | |
1218 | SMMUv3State *s = ARM_SMMUV3(dev); | |
1219 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); | |
1220 | ||
1221 | c->parent_reset(dev); | |
1222 | ||
1223 | smmuv3_init_regs(s); | |
1224 | } | |
1225 | ||
1226 | static void smmu_realize(DeviceState *d, Error **errp) | |
1227 | { | |
1228 | SMMUState *sys = ARM_SMMU(d); | |
1229 | SMMUv3State *s = ARM_SMMUV3(sys); | |
1230 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); | |
1231 | SysBusDevice *dev = SYS_BUS_DEVICE(d); | |
1232 | Error *local_err = NULL; | |
1233 | ||
1234 | c->parent_realize(d, &local_err); | |
1235 | if (local_err) { | |
1236 | error_propagate(errp, local_err); | |
1237 | return; | |
1238 | } | |
1239 | ||
32cfd7f3 EA |
1240 | qemu_mutex_init(&s->mutex); |
1241 | ||
10a83cb9 PM |
1242 | memory_region_init_io(&sys->iomem, OBJECT(s), |
1243 | &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); | |
1244 | ||
1245 | sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; | |
1246 | ||
1247 | sysbus_init_mmio(dev, &sys->iomem); | |
1248 | ||
1249 | smmu_init_irq(s, dev); | |
1250 | } | |
1251 | ||
1252 | static const VMStateDescription vmstate_smmuv3_queue = { | |
1253 | .name = "smmuv3_queue", | |
1254 | .version_id = 1, | |
1255 | .minimum_version_id = 1, | |
1256 | .fields = (VMStateField[]) { | |
1257 | VMSTATE_UINT64(base, SMMUQueue), | |
1258 | VMSTATE_UINT32(prod, SMMUQueue), | |
1259 | VMSTATE_UINT32(cons, SMMUQueue), | |
1260 | VMSTATE_UINT8(log2size, SMMUQueue), | |
1261 | }, | |
1262 | }; | |
1263 | ||
1264 | static const VMStateDescription vmstate_smmuv3 = { | |
1265 | .name = "smmuv3", | |
1266 | .version_id = 1, | |
1267 | .minimum_version_id = 1, | |
1268 | .fields = (VMStateField[]) { | |
1269 | VMSTATE_UINT32(features, SMMUv3State), | |
1270 | VMSTATE_UINT8(sid_size, SMMUv3State), | |
1271 | VMSTATE_UINT8(sid_split, SMMUv3State), | |
1272 | ||
1273 | VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), | |
1274 | VMSTATE_UINT32(cr0ack, SMMUv3State), | |
1275 | VMSTATE_UINT32(statusr, SMMUv3State), | |
1276 | VMSTATE_UINT32(irq_ctrl, SMMUv3State), | |
1277 | VMSTATE_UINT32(gerror, SMMUv3State), | |
1278 | VMSTATE_UINT32(gerrorn, SMMUv3State), | |
1279 | VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), | |
1280 | VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), | |
1281 | VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), | |
1282 | VMSTATE_UINT64(strtab_base, SMMUv3State), | |
1283 | VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), | |
1284 | VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), | |
1285 | VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), | |
1286 | VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), | |
1287 | ||
1288 | VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), | |
1289 | VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), | |
1290 | ||
1291 | VMSTATE_END_OF_LIST(), | |
1292 | }, | |
1293 | }; | |
1294 | ||
1295 | static void smmuv3_instance_init(Object *obj) | |
1296 | { | |
1297 | /* Nothing much to do here as of now */ | |
1298 | } | |
1299 | ||
1300 | static void smmuv3_class_init(ObjectClass *klass, void *data) | |
1301 | { | |
1302 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1303 | SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); | |
1304 | ||
1305 | dc->vmsd = &vmstate_smmuv3; | |
1306 | device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset); | |
1307 | c->parent_realize = dc->realize; | |
1308 | dc->realize = smmu_realize; | |
1309 | } | |
1310 | ||
0d1ac82e EA |
1311 | static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, |
1312 | IOMMUNotifierFlag old, | |
1313 | IOMMUNotifierFlag new) | |
1314 | { | |
1315 | if (old == IOMMU_NOTIFIER_NONE) { | |
1316 | warn_report("SMMUV3 does not support vhost/vfio integration yet: " | |
1317 | "devices of those types will not function properly"); | |
1318 | } | |
1319 | } | |
1320 | ||
10a83cb9 PM |
1321 | static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, |
1322 | void *data) | |
1323 | { | |
9bde7f06 EA |
1324 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); |
1325 | ||
1326 | imrc->translate = smmuv3_translate; | |
0d1ac82e | 1327 | imrc->notify_flag_changed = smmuv3_notify_flag_changed; |
10a83cb9 PM |
1328 | } |
1329 | ||
1330 | static const TypeInfo smmuv3_type_info = { | |
1331 | .name = TYPE_ARM_SMMUV3, | |
1332 | .parent = TYPE_ARM_SMMU, | |
1333 | .instance_size = sizeof(SMMUv3State), | |
1334 | .instance_init = smmuv3_instance_init, | |
1335 | .class_size = sizeof(SMMUv3Class), | |
1336 | .class_init = smmuv3_class_init, | |
1337 | }; | |
1338 | ||
1339 | static const TypeInfo smmuv3_iommu_memory_region_info = { | |
1340 | .parent = TYPE_IOMMU_MEMORY_REGION, | |
1341 | .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, | |
1342 | .class_init = smmuv3_iommu_memory_region_class_init, | |
1343 | }; | |
1344 | ||
1345 | static void smmuv3_register_types(void) | |
1346 | { | |
1347 | type_register(&smmuv3_type_info); | |
1348 | type_register(&smmuv3_iommu_memory_region_info); | |
1349 | } | |
1350 | ||
1351 | type_init(smmuv3_register_types) | |
1352 |