]>
Commit | Line | Data |
---|---|---|
d29a09ca DK |
1 | /* |
2 | * QEMU emulation of AMD IOMMU (AMD-Vi) | |
3 | * | |
4 | * Copyright (C) 2011 Eduard - Gabriel Munteanu | |
5 | * Copyright (C) 2015 David Kiarie, <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | ||
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | ||
17 | * You should have received a copy of the GNU General Public License along | |
18 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
19 | * | |
20 | * Cache implementation inspired by hw/i386/intel_iommu.c | |
21 | */ | |
22 | #include "qemu/osdep.h" | |
23 | #include "hw/i386/amd_iommu.h" | |
a3276f78 | 24 | #include "qemu/error-report.h" |
d29a09ca DK |
25 | #include "trace.h" |
26 | ||
27 | /* used AMD-Vi MMIO registers */ | |
28 | const char *amdvi_mmio_low[] = { | |
29 | "AMDVI_MMIO_DEVTAB_BASE", | |
30 | "AMDVI_MMIO_CMDBUF_BASE", | |
31 | "AMDVI_MMIO_EVTLOG_BASE", | |
32 | "AMDVI_MMIO_CONTROL", | |
33 | "AMDVI_MMIO_EXCL_BASE", | |
34 | "AMDVI_MMIO_EXCL_LIMIT", | |
35 | "AMDVI_MMIO_EXT_FEATURES", | |
36 | "AMDVI_MMIO_PPR_BASE", | |
37 | "UNHANDLED" | |
38 | }; | |
39 | const char *amdvi_mmio_high[] = { | |
40 | "AMDVI_MMIO_COMMAND_HEAD", | |
41 | "AMDVI_MMIO_COMMAND_TAIL", | |
42 | "AMDVI_MMIO_EVTLOG_HEAD", | |
43 | "AMDVI_MMIO_EVTLOG_TAIL", | |
44 | "AMDVI_MMIO_STATUS", | |
45 | "AMDVI_MMIO_PPR_HEAD", | |
46 | "AMDVI_MMIO_PPR_TAIL", | |
47 | "UNHANDLED" | |
48 | }; | |
49 | ||
50 | struct AMDVIAddressSpace { | |
51 | uint8_t bus_num; /* bus number */ | |
52 | uint8_t devfn; /* device function */ | |
53 | AMDVIState *iommu_state; /* AMDVI - one per machine */ | |
54 | MemoryRegion iommu; /* Device's address translation region */ | |
55 | MemoryRegion iommu_ir; /* Device's interrupt remapping region */ | |
56 | AddressSpace as; /* device's corresponding address space */ | |
57 | }; | |
58 | ||
59 | /* AMDVI cache entry */ | |
60 | typedef struct AMDVIIOTLBEntry { | |
61 | uint16_t domid; /* assigned domain id */ | |
62 | uint16_t devid; /* device owning entry */ | |
63 | uint64_t perms; /* access permissions */ | |
64 | uint64_t translated_addr; /* translated address */ | |
65 | uint64_t page_mask; /* physical page size */ | |
66 | } AMDVIIOTLBEntry; | |
67 | ||
68 | /* configure MMIO registers at startup/reset */ | |
69 | static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val, | |
70 | uint64_t romask, uint64_t w1cmask) | |
71 | { | |
72 | stq_le_p(&s->mmior[addr], val); | |
73 | stq_le_p(&s->romask[addr], romask); | |
74 | stq_le_p(&s->w1cmask[addr], w1cmask); | |
75 | } | |
76 | ||
77 | static uint16_t amdvi_readw(AMDVIState *s, hwaddr addr) | |
78 | { | |
79 | return lduw_le_p(&s->mmior[addr]); | |
80 | } | |
81 | ||
82 | static uint32_t amdvi_readl(AMDVIState *s, hwaddr addr) | |
83 | { | |
84 | return ldl_le_p(&s->mmior[addr]); | |
85 | } | |
86 | ||
87 | static uint64_t amdvi_readq(AMDVIState *s, hwaddr addr) | |
88 | { | |
89 | return ldq_le_p(&s->mmior[addr]); | |
90 | } | |
91 | ||
92 | /* internal write */ | |
93 | static void amdvi_writeq_raw(AMDVIState *s, uint64_t val, hwaddr addr) | |
94 | { | |
95 | stq_le_p(&s->mmior[addr], val); | |
96 | } | |
97 | ||
98 | /* external write */ | |
99 | static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val) | |
100 | { | |
101 | uint16_t romask = lduw_le_p(&s->romask[addr]); | |
102 | uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]); | |
103 | uint16_t oldval = lduw_le_p(&s->mmior[addr]); | |
104 | stw_le_p(&s->mmior[addr], | |
105 | ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); | |
106 | } | |
107 | ||
108 | static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val) | |
109 | { | |
110 | uint32_t romask = ldl_le_p(&s->romask[addr]); | |
111 | uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); | |
112 | uint32_t oldval = ldl_le_p(&s->mmior[addr]); | |
113 | stl_le_p(&s->mmior[addr], | |
114 | ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); | |
115 | } | |
116 | ||
117 | static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val) | |
118 | { | |
119 | uint64_t romask = ldq_le_p(&s->romask[addr]); | |
120 | uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); | |
121 | uint32_t oldval = ldq_le_p(&s->mmior[addr]); | |
122 | stq_le_p(&s->mmior[addr], | |
123 | ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); | |
124 | } | |
125 | ||
126 | /* OR a 64-bit register with a 64-bit value */ | |
127 | static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val) | |
128 | { | |
129 | return amdvi_readq(s, addr) | val; | |
130 | } | |
131 | ||
132 | /* OR a 64-bit register with a 64-bit value storing result in the register */ | |
133 | static void amdvi_assign_orq(AMDVIState *s, hwaddr addr, uint64_t val) | |
134 | { | |
135 | amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) | val); | |
136 | } | |
137 | ||
138 | /* AND a 64-bit register with a 64-bit value storing result in the register */ | |
139 | static void amdvi_assign_andq(AMDVIState *s, hwaddr addr, uint64_t val) | |
140 | { | |
141 | amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) & val); | |
142 | } | |
143 | ||
144 | static void amdvi_generate_msi_interrupt(AMDVIState *s) | |
145 | { | |
1d5b128c DK |
146 | MSIMessage msg = {}; |
147 | MemTxAttrs attrs = { | |
148 | .requester_id = pci_requester_id(&s->pci.dev) | |
149 | }; | |
d29a09ca DK |
150 | |
151 | if (msi_enabled(&s->pci.dev)) { | |
152 | msg = msi_get_message(&s->pci.dev, 0); | |
153 | address_space_stl_le(&address_space_memory, msg.address, msg.data, | |
154 | attrs, NULL); | |
155 | } | |
156 | } | |
157 | ||
158 | static void amdvi_log_event(AMDVIState *s, uint64_t *evt) | |
159 | { | |
160 | /* event logging not enabled */ | |
161 | if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS, | |
162 | AMDVI_MMIO_STATUS_EVT_OVF)) { | |
163 | return; | |
164 | } | |
165 | ||
166 | /* event log buffer full */ | |
167 | if (s->evtlog_tail >= s->evtlog_len) { | |
168 | amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF); | |
169 | /* generate interrupt */ | |
170 | amdvi_generate_msi_interrupt(s); | |
171 | return; | |
172 | } | |
173 | ||
174 | if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail, | |
175 | &evt, AMDVI_EVENT_LEN)) { | |
176 | trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail); | |
177 | } | |
178 | ||
179 | s->evtlog_tail += AMDVI_EVENT_LEN; | |
180 | amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); | |
181 | amdvi_generate_msi_interrupt(s); | |
182 | } | |
183 | ||
184 | static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start, | |
185 | int length) | |
186 | { | |
187 | int index = start / 64, bitpos = start % 64; | |
1d5b128c | 188 | uint64_t mask = MAKE_64BIT_MASK(start, length); |
d29a09ca DK |
189 | buffer[index] &= ~mask; |
190 | buffer[index] |= (value << bitpos) & mask; | |
191 | } | |
192 | /* | |
193 | * AMDVi event structure | |
194 | * 0:15 -> DeviceID | |
195 | * 55:63 -> event type + miscellaneous info | |
196 | * 63:127 -> related address | |
197 | */ | |
198 | static void amdvi_encode_event(uint64_t *evt, uint16_t devid, uint64_t addr, | |
199 | uint16_t info) | |
200 | { | |
201 | amdvi_setevent_bits(evt, devid, 0, 16); | |
202 | amdvi_setevent_bits(evt, info, 55, 8); | |
203 | amdvi_setevent_bits(evt, addr, 63, 64); | |
204 | } | |
205 | /* log an error encountered during a page walk | |
206 | * | |
207 | * @addr: virtual address in translation request | |
208 | */ | |
209 | static void amdvi_page_fault(AMDVIState *s, uint16_t devid, | |
210 | hwaddr addr, uint16_t info) | |
211 | { | |
212 | uint64_t evt[4]; | |
213 | ||
214 | info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF; | |
215 | amdvi_encode_event(evt, devid, addr, info); | |
216 | amdvi_log_event(s, evt); | |
217 | pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, | |
218 | PCI_STATUS_SIG_TARGET_ABORT); | |
219 | } | |
220 | /* | |
221 | * log a master abort accessing device table | |
222 | * @devtab : address of device table entry | |
223 | * @info : error flags | |
224 | */ | |
225 | static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid, | |
226 | hwaddr devtab, uint16_t info) | |
227 | { | |
228 | uint64_t evt[4]; | |
229 | ||
230 | info |= AMDVI_EVENT_DEV_TAB_HW_ERROR; | |
231 | ||
232 | amdvi_encode_event(evt, devid, devtab, info); | |
233 | amdvi_log_event(s, evt); | |
234 | pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, | |
235 | PCI_STATUS_SIG_TARGET_ABORT); | |
236 | } | |
237 | /* log an event trying to access command buffer | |
238 | * @addr : address that couldn't be accessed | |
239 | */ | |
240 | static void amdvi_log_command_error(AMDVIState *s, hwaddr addr) | |
241 | { | |
242 | uint64_t evt[4], info = AMDVI_EVENT_COMMAND_HW_ERROR; | |
243 | ||
244 | amdvi_encode_event(evt, 0, addr, info); | |
245 | amdvi_log_event(s, evt); | |
246 | pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, | |
247 | PCI_STATUS_SIG_TARGET_ABORT); | |
248 | } | |
249 | /* log an illegal comand event | |
250 | * @addr : address of illegal command | |
251 | */ | |
252 | static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info, | |
253 | hwaddr addr) | |
254 | { | |
255 | uint64_t evt[4]; | |
256 | ||
257 | info |= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR; | |
258 | amdvi_encode_event(evt, 0, addr, info); | |
259 | amdvi_log_event(s, evt); | |
260 | } | |
261 | /* log an error accessing device table | |
262 | * | |
263 | * @devid : device owning the table entry | |
264 | * @devtab : address of device table entry | |
265 | * @info : error flags | |
266 | */ | |
267 | static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid, | |
268 | hwaddr addr, uint16_t info) | |
269 | { | |
270 | uint64_t evt[4]; | |
271 | ||
272 | info |= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY; | |
273 | amdvi_encode_event(evt, devid, addr, info); | |
274 | amdvi_log_event(s, evt); | |
275 | } | |
276 | /* log an error accessing a PTE entry | |
277 | * @addr : address that couldn't be accessed | |
278 | */ | |
279 | static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid, | |
280 | hwaddr addr, uint16_t info) | |
281 | { | |
282 | uint64_t evt[4]; | |
283 | ||
284 | info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR; | |
285 | amdvi_encode_event(evt, devid, addr, info); | |
286 | amdvi_log_event(s, evt); | |
287 | pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS, | |
288 | PCI_STATUS_SIG_TARGET_ABORT); | |
289 | } | |
290 | ||
291 | static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2) | |
292 | { | |
293 | return *((const uint64_t *)v1) == *((const uint64_t *)v2); | |
294 | } | |
295 | ||
296 | static guint amdvi_uint64_hash(gconstpointer v) | |
297 | { | |
298 | return (guint)*(const uint64_t *)v; | |
299 | } | |
300 | ||
301 | static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr, | |
302 | uint64_t devid) | |
303 | { | |
304 | uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) | | |
305 | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); | |
306 | return g_hash_table_lookup(s->iotlb, &key); | |
307 | } | |
308 | ||
309 | static void amdvi_iotlb_reset(AMDVIState *s) | |
310 | { | |
311 | assert(s->iotlb); | |
312 | trace_amdvi_iotlb_reset(); | |
313 | g_hash_table_remove_all(s->iotlb); | |
314 | } | |
315 | ||
316 | static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value, | |
317 | gpointer user_data) | |
318 | { | |
319 | AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value; | |
320 | uint16_t devid = *(uint16_t *)user_data; | |
321 | return entry->devid == devid; | |
322 | } | |
323 | ||
324 | static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr, | |
325 | uint64_t devid) | |
326 | { | |
327 | uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) | | |
328 | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); | |
329 | g_hash_table_remove(s->iotlb, &key); | |
330 | } | |
331 | ||
332 | static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid, | |
333 | uint64_t gpa, IOMMUTLBEntry to_cache, | |
334 | uint16_t domid) | |
335 | { | |
1d5b128c DK |
336 | AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1); |
337 | uint64_t *key = g_new(uint64_t, 1); | |
d29a09ca DK |
338 | uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K; |
339 | ||
340 | /* don't cache erroneous translations */ | |
341 | if (to_cache.perm != IOMMU_NONE) { | |
342 | trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid), | |
343 | PCI_FUNC(devid), gpa, to_cache.translated_addr); | |
344 | ||
345 | if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) { | |
346 | amdvi_iotlb_reset(s); | |
347 | } | |
348 | ||
349 | entry->domid = domid; | |
350 | entry->perms = to_cache.perm; | |
351 | entry->translated_addr = to_cache.translated_addr; | |
352 | entry->page_mask = to_cache.addr_mask; | |
353 | *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); | |
354 | g_hash_table_replace(s->iotlb, key, entry); | |
355 | } | |
356 | } | |
357 | ||
358 | static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd) | |
359 | { | |
360 | /* pad the last 3 bits */ | |
361 | hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3; | |
362 | uint64_t data = cpu_to_le64(cmd[1]); | |
363 | ||
364 | if (extract64(cmd[0], 51, 8)) { | |
365 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
366 | s->cmdbuf + s->cmdbuf_head); | |
367 | } | |
368 | if (extract64(cmd[0], 0, 1)) { | |
369 | if (dma_memory_write(&address_space_memory, addr, &data, | |
370 | AMDVI_COMPLETION_DATA_SIZE)) { | |
371 | trace_amdvi_completion_wait_fail(addr); | |
372 | } | |
373 | } | |
374 | /* set completion interrupt */ | |
375 | if (extract64(cmd[0], 1, 1)) { | |
376 | amdvi_test_mask(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); | |
377 | /* generate interrupt */ | |
378 | amdvi_generate_msi_interrupt(s); | |
379 | } | |
380 | trace_amdvi_completion_wait(addr, data); | |
381 | } | |
382 | ||
383 | /* log error without aborting since linux seems to be using reserved bits */ | |
384 | static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd) | |
385 | { | |
386 | uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16)); | |
387 | ||
388 | /* This command should invalidate internal caches of which there isn't */ | |
389 | if (extract64(cmd[0], 15, 16) || cmd[1]) { | |
390 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
391 | s->cmdbuf + s->cmdbuf_head); | |
392 | } | |
393 | trace_amdvi_devtab_inval(PCI_BUS_NUM(devid), PCI_SLOT(devid), | |
394 | PCI_FUNC(devid)); | |
395 | } | |
396 | ||
397 | static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd) | |
398 | { | |
399 | if (extract64(cmd[0], 15, 16) || extract64(cmd[0], 19, 8) || | |
400 | extract64(cmd[1], 0, 2) || extract64(cmd[1], 3, 29) | |
401 | || extract64(cmd[1], 47, 16)) { | |
402 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
403 | s->cmdbuf + s->cmdbuf_head); | |
404 | } | |
405 | trace_amdvi_ppr_exec(); | |
406 | } | |
407 | ||
408 | static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd) | |
409 | { | |
410 | if (extract64(cmd[0], 0, 60) || cmd[1]) { | |
411 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
412 | s->cmdbuf + s->cmdbuf_head); | |
413 | } | |
414 | ||
415 | amdvi_iotlb_reset(s); | |
416 | trace_amdvi_all_inval(); | |
417 | } | |
418 | ||
419 | static gboolean amdvi_iotlb_remove_by_domid(gpointer key, gpointer value, | |
420 | gpointer user_data) | |
421 | { | |
422 | AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value; | |
423 | uint16_t domid = *(uint16_t *)user_data; | |
424 | return entry->domid == domid; | |
425 | } | |
426 | ||
427 | /* we don't have devid - we can't remove pages by address */ | |
428 | static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd) | |
429 | { | |
430 | uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16)); | |
431 | ||
432 | if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 16, 12) || | |
433 | extract64(cmd[0], 3, 10)) { | |
434 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
435 | s->cmdbuf + s->cmdbuf_head); | |
436 | } | |
437 | ||
438 | g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid, | |
439 | &domid); | |
440 | trace_amdvi_pages_inval(domid); | |
441 | } | |
442 | ||
443 | static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd) | |
444 | { | |
445 | if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 20, 8) || | |
446 | extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) || | |
447 | extract64(cmd[1], 5, 7)) { | |
448 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
449 | s->cmdbuf + s->cmdbuf_head); | |
450 | } | |
451 | ||
452 | trace_amdvi_prefetch_pages(); | |
453 | } | |
454 | ||
455 | static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd) | |
456 | { | |
457 | if (extract64(cmd[0], 16, 16) || cmd[1]) { | |
458 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
459 | s->cmdbuf + s->cmdbuf_head); | |
460 | return; | |
461 | } | |
462 | ||
463 | trace_amdvi_intr_inval(); | |
464 | } | |
465 | ||
466 | /* FIXME: Try to work with the specified size instead of all the pages | |
467 | * when the S bit is on | |
468 | */ | |
469 | static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd) | |
470 | { | |
471 | ||
472 | uint16_t devid = extract64(cmd[0], 0, 16); | |
473 | if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 9)) { | |
474 | amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), | |
475 | s->cmdbuf + s->cmdbuf_head); | |
476 | return; | |
477 | } | |
478 | ||
479 | if (extract64(cmd[1], 0, 1)) { | |
480 | g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_devid, | |
481 | &devid); | |
482 | } else { | |
483 | amdvi_iotlb_remove_page(s, cpu_to_le64(extract64(cmd[1], 12, 52)) << 12, | |
484 | cpu_to_le16(extract64(cmd[1], 0, 16))); | |
485 | } | |
486 | trace_amdvi_iotlb_inval(); | |
487 | } | |
488 | ||
489 | /* not honouring reserved bits is regarded as an illegal command */ | |
490 | static void amdvi_cmdbuf_exec(AMDVIState *s) | |
491 | { | |
492 | uint64_t cmd[2]; | |
493 | ||
494 | if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, | |
495 | cmd, AMDVI_COMMAND_SIZE)) { | |
496 | trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head); | |
497 | amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head); | |
498 | return; | |
499 | } | |
500 | ||
501 | switch (extract64(cmd[0], 60, 4)) { | |
502 | case AMDVI_CMD_COMPLETION_WAIT: | |
503 | amdvi_completion_wait(s, cmd); | |
504 | break; | |
505 | case AMDVI_CMD_INVAL_DEVTAB_ENTRY: | |
506 | amdvi_inval_devtab_entry(s, cmd); | |
507 | break; | |
508 | case AMDVI_CMD_INVAL_AMDVI_PAGES: | |
509 | amdvi_inval_pages(s, cmd); | |
510 | break; | |
511 | case AMDVI_CMD_INVAL_IOTLB_PAGES: | |
512 | iommu_inval_iotlb(s, cmd); | |
513 | break; | |
514 | case AMDVI_CMD_INVAL_INTR_TABLE: | |
515 | amdvi_inval_inttable(s, cmd); | |
516 | break; | |
517 | case AMDVI_CMD_PREFETCH_AMDVI_PAGES: | |
518 | amdvi_prefetch_pages(s, cmd); | |
519 | break; | |
520 | case AMDVI_CMD_COMPLETE_PPR_REQUEST: | |
521 | amdvi_complete_ppr(s, cmd); | |
522 | break; | |
523 | case AMDVI_CMD_INVAL_AMDVI_ALL: | |
524 | amdvi_inval_all(s, cmd); | |
525 | break; | |
526 | default: | |
527 | trace_amdvi_unhandled_command(extract64(cmd[1], 60, 4)); | |
528 | /* log illegal command */ | |
529 | amdvi_log_illegalcom_error(s, extract64(cmd[1], 60, 4), | |
530 | s->cmdbuf + s->cmdbuf_head); | |
531 | } | |
532 | } | |
533 | ||
534 | static void amdvi_cmdbuf_run(AMDVIState *s) | |
535 | { | |
536 | if (!s->cmdbuf_enabled) { | |
537 | trace_amdvi_command_error(amdvi_readq(s, AMDVI_MMIO_CONTROL)); | |
538 | return; | |
539 | } | |
540 | ||
541 | /* check if there is work to do. */ | |
542 | while (s->cmdbuf_head != s->cmdbuf_tail) { | |
543 | trace_amdvi_command_exec(s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf); | |
544 | amdvi_cmdbuf_exec(s); | |
545 | s->cmdbuf_head += AMDVI_COMMAND_SIZE; | |
546 | amdvi_writeq_raw(s, s->cmdbuf_head, AMDVI_MMIO_COMMAND_HEAD); | |
547 | ||
548 | /* wrap head pointer */ | |
549 | if (s->cmdbuf_head >= s->cmdbuf_len * AMDVI_COMMAND_SIZE) { | |
550 | s->cmdbuf_head = 0; | |
551 | } | |
552 | } | |
553 | } | |
554 | ||
555 | static void amdvi_mmio_trace(hwaddr addr, unsigned size) | |
556 | { | |
557 | uint8_t index = (addr & ~0x2000) / 8; | |
558 | ||
559 | if ((addr & 0x2000)) { | |
560 | /* high table */ | |
561 | index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index; | |
562 | trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07); | |
563 | } else { | |
564 | index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index; | |
565 | trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07); | |
566 | } | |
567 | } | |
568 | ||
569 | static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size) | |
570 | { | |
571 | AMDVIState *s = opaque; | |
572 | ||
573 | uint64_t val = -1; | |
574 | if (addr + size > AMDVI_MMIO_SIZE) { | |
575 | trace_amdvi_mmio_read("error: addr outside region: max ", | |
576 | (uint64_t)AMDVI_MMIO_SIZE, addr, size); | |
577 | return (uint64_t)-1; | |
578 | } | |
579 | ||
580 | if (size == 2) { | |
581 | val = amdvi_readw(s, addr); | |
582 | } else if (size == 4) { | |
583 | val = amdvi_readl(s, addr); | |
584 | } else if (size == 8) { | |
585 | val = amdvi_readq(s, addr); | |
586 | } | |
587 | amdvi_mmio_trace(addr, size); | |
588 | ||
589 | return val; | |
590 | } | |
591 | ||
592 | static void amdvi_handle_control_write(AMDVIState *s) | |
593 | { | |
594 | unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL); | |
595 | s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN); | |
596 | ||
597 | s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN); | |
598 | s->evtlog_enabled = s->enabled && !!(control & | |
599 | AMDVI_MMIO_CONTROL_EVENTLOGEN); | |
600 | ||
601 | s->evtlog_intr = !!(control & AMDVI_MMIO_CONTROL_EVENTINTEN); | |
602 | s->completion_wait_intr = !!(control & AMDVI_MMIO_CONTROL_COMWAITINTEN); | |
603 | s->cmdbuf_enabled = s->enabled && !!(control & | |
604 | AMDVI_MMIO_CONTROL_CMDBUFLEN); | |
605 | ||
606 | /* update the flags depending on the control register */ | |
607 | if (s->cmdbuf_enabled) { | |
608 | amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_CMDBUF_RUN); | |
609 | } else { | |
610 | amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_CMDBUF_RUN); | |
611 | } | |
612 | if (s->evtlog_enabled) { | |
613 | amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_RUN); | |
614 | } else { | |
615 | amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_EVT_RUN); | |
616 | } | |
617 | ||
618 | trace_amdvi_control_status(control); | |
619 | amdvi_cmdbuf_run(s); | |
620 | } | |
621 | ||
622 | static inline void amdvi_handle_devtab_write(AMDVIState *s) | |
623 | ||
624 | { | |
625 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE); | |
626 | s->devtab = (val & AMDVI_MMIO_DEVTAB_BASE_MASK); | |
627 | ||
628 | /* set device table length */ | |
629 | s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 * | |
630 | (AMDVI_MMIO_DEVTAB_SIZE_UNIT / | |
631 | AMDVI_MMIO_DEVTAB_ENTRY_SIZE)); | |
632 | } | |
633 | ||
634 | static inline void amdvi_handle_cmdhead_write(AMDVIState *s) | |
635 | { | |
636 | s->cmdbuf_head = amdvi_readq(s, AMDVI_MMIO_COMMAND_HEAD) | |
637 | & AMDVI_MMIO_CMDBUF_HEAD_MASK; | |
638 | amdvi_cmdbuf_run(s); | |
639 | } | |
640 | ||
641 | static inline void amdvi_handle_cmdbase_write(AMDVIState *s) | |
642 | { | |
643 | s->cmdbuf = amdvi_readq(s, AMDVI_MMIO_COMMAND_BASE) | |
644 | & AMDVI_MMIO_CMDBUF_BASE_MASK; | |
645 | s->cmdbuf_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_CMDBUF_SIZE_BYTE) | |
646 | & AMDVI_MMIO_CMDBUF_SIZE_MASK); | |
647 | s->cmdbuf_head = s->cmdbuf_tail = 0; | |
648 | } | |
649 | ||
650 | static inline void amdvi_handle_cmdtail_write(AMDVIState *s) | |
651 | { | |
652 | s->cmdbuf_tail = amdvi_readq(s, AMDVI_MMIO_COMMAND_TAIL) | |
653 | & AMDVI_MMIO_CMDBUF_TAIL_MASK; | |
654 | amdvi_cmdbuf_run(s); | |
655 | } | |
656 | ||
657 | static inline void amdvi_handle_excllim_write(AMDVIState *s) | |
658 | { | |
659 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_EXCL_LIMIT); | |
660 | s->excl_limit = (val & AMDVI_MMIO_EXCL_LIMIT_MASK) | | |
661 | AMDVI_MMIO_EXCL_LIMIT_LOW; | |
662 | } | |
663 | ||
664 | static inline void amdvi_handle_evtbase_write(AMDVIState *s) | |
665 | { | |
666 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE); | |
667 | s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK; | |
668 | s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE) | |
669 | & AMDVI_MMIO_EVTLOG_SIZE_MASK); | |
670 | } | |
671 | ||
672 | static inline void amdvi_handle_evttail_write(AMDVIState *s) | |
673 | { | |
674 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_TAIL); | |
675 | s->evtlog_tail = val & AMDVI_MMIO_EVTLOG_TAIL_MASK; | |
676 | } | |
677 | ||
678 | static inline void amdvi_handle_evthead_write(AMDVIState *s) | |
679 | { | |
680 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_HEAD); | |
681 | s->evtlog_head = val & AMDVI_MMIO_EVTLOG_HEAD_MASK; | |
682 | } | |
683 | ||
684 | static inline void amdvi_handle_pprbase_write(AMDVIState *s) | |
685 | { | |
686 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_BASE); | |
687 | s->ppr_log = val & AMDVI_MMIO_PPRLOG_BASE_MASK; | |
688 | s->pprlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_PPRLOG_SIZE_BYTE) | |
689 | & AMDVI_MMIO_PPRLOG_SIZE_MASK); | |
690 | } | |
691 | ||
692 | static inline void amdvi_handle_pprhead_write(AMDVIState *s) | |
693 | { | |
694 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_HEAD); | |
695 | s->pprlog_head = val & AMDVI_MMIO_PPRLOG_HEAD_MASK; | |
696 | } | |
697 | ||
698 | static inline void amdvi_handle_pprtail_write(AMDVIState *s) | |
699 | { | |
700 | uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_TAIL); | |
701 | s->pprlog_tail = val & AMDVI_MMIO_PPRLOG_TAIL_MASK; | |
702 | } | |
703 | ||
704 | /* FIXME: something might go wrong if System Software writes in chunks | |
705 | * of one byte but linux writes in chunks of 4 bytes so currently it | |
706 | * works correctly with linux but will definitely be busted if software | |
707 | * reads/writes 8 bytes | |
708 | */ | |
709 | static void amdvi_mmio_reg_write(AMDVIState *s, unsigned size, uint64_t val, | |
710 | hwaddr addr) | |
711 | { | |
712 | if (size == 2) { | |
713 | amdvi_writew(s, addr, val); | |
714 | } else if (size == 4) { | |
715 | amdvi_writel(s, addr, val); | |
716 | } else if (size == 8) { | |
717 | amdvi_writeq(s, addr, val); | |
718 | } | |
719 | } | |
720 | ||
721 | static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val, | |
722 | unsigned size) | |
723 | { | |
724 | AMDVIState *s = opaque; | |
725 | unsigned long offset = addr & 0x07; | |
726 | ||
727 | if (addr + size > AMDVI_MMIO_SIZE) { | |
728 | trace_amdvi_mmio_write("error: addr outside region: max ", | |
729 | (uint64_t)AMDVI_MMIO_SIZE, size, val, offset); | |
730 | return; | |
731 | } | |
732 | ||
733 | amdvi_mmio_trace(addr, size); | |
734 | switch (addr & ~0x07) { | |
735 | case AMDVI_MMIO_CONTROL: | |
736 | amdvi_mmio_reg_write(s, size, val, addr); | |
737 | amdvi_handle_control_write(s); | |
738 | break; | |
739 | case AMDVI_MMIO_DEVICE_TABLE: | |
740 | amdvi_mmio_reg_write(s, size, val, addr); | |
741 | /* set device table address | |
742 | * This also suffers from inability to tell whether software | |
743 | * is done writing | |
744 | */ | |
745 | if (offset || (size == 8)) { | |
746 | amdvi_handle_devtab_write(s); | |
747 | } | |
748 | break; | |
749 | case AMDVI_MMIO_COMMAND_HEAD: | |
750 | amdvi_mmio_reg_write(s, size, val, addr); | |
751 | amdvi_handle_cmdhead_write(s); | |
752 | break; | |
753 | case AMDVI_MMIO_COMMAND_BASE: | |
754 | amdvi_mmio_reg_write(s, size, val, addr); | |
755 | /* FIXME - make sure System Software has finished writing incase | |
756 | * it writes in chucks less than 8 bytes in a robust way.As for | |
757 | * now, this hacks works for the linux driver | |
758 | */ | |
759 | if (offset || (size == 8)) { | |
760 | amdvi_handle_cmdbase_write(s); | |
761 | } | |
762 | break; | |
763 | case AMDVI_MMIO_COMMAND_TAIL: | |
764 | amdvi_mmio_reg_write(s, size, val, addr); | |
765 | amdvi_handle_cmdtail_write(s); | |
766 | break; | |
767 | case AMDVI_MMIO_EVENT_BASE: | |
768 | amdvi_mmio_reg_write(s, size, val, addr); | |
769 | amdvi_handle_evtbase_write(s); | |
770 | break; | |
771 | case AMDVI_MMIO_EVENT_HEAD: | |
772 | amdvi_mmio_reg_write(s, size, val, addr); | |
773 | amdvi_handle_evthead_write(s); | |
774 | break; | |
775 | case AMDVI_MMIO_EVENT_TAIL: | |
776 | amdvi_mmio_reg_write(s, size, val, addr); | |
777 | amdvi_handle_evttail_write(s); | |
778 | break; | |
779 | case AMDVI_MMIO_EXCL_LIMIT: | |
780 | amdvi_mmio_reg_write(s, size, val, addr); | |
781 | amdvi_handle_excllim_write(s); | |
782 | break; | |
783 | /* PPR log base - unused for now */ | |
784 | case AMDVI_MMIO_PPR_BASE: | |
785 | amdvi_mmio_reg_write(s, size, val, addr); | |
786 | amdvi_handle_pprbase_write(s); | |
787 | break; | |
788 | /* PPR log head - also unused for now */ | |
789 | case AMDVI_MMIO_PPR_HEAD: | |
790 | amdvi_mmio_reg_write(s, size, val, addr); | |
791 | amdvi_handle_pprhead_write(s); | |
792 | break; | |
793 | /* PPR log tail - unused for now */ | |
794 | case AMDVI_MMIO_PPR_TAIL: | |
795 | amdvi_mmio_reg_write(s, size, val, addr); | |
796 | amdvi_handle_pprtail_write(s); | |
797 | break; | |
798 | } | |
799 | } | |
800 | ||
801 | static inline uint64_t amdvi_get_perms(uint64_t entry) | |
802 | { | |
803 | return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >> | |
804 | AMDVI_DEV_PERM_SHIFT; | |
805 | } | |
806 | ||
807 | /* a valid entry should have V = 1 and reserved bits honoured */ | |
808 | static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid, | |
809 | uint64_t *dte) | |
810 | { | |
811 | if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED) | |
812 | || (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED) | |
813 | || (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) { | |
814 | amdvi_log_illegaldevtab_error(s, devid, | |
815 | s->devtab + | |
816 | devid * AMDVI_DEVTAB_ENTRY_SIZE, 0); | |
817 | return false; | |
818 | } | |
819 | ||
820 | return dte[0] & AMDVI_DEV_VALID; | |
821 | } | |
822 | ||
823 | /* get a device table entry given the devid */ | |
824 | static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry) | |
825 | { | |
826 | uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE; | |
827 | ||
828 | if (dma_memory_read(&address_space_memory, s->devtab + offset, entry, | |
829 | AMDVI_DEVTAB_ENTRY_SIZE)) { | |
830 | trace_amdvi_dte_get_fail(s->devtab, offset); | |
831 | /* log error accessing dte */ | |
832 | amdvi_log_devtab_error(s, devid, s->devtab + offset, 0); | |
833 | return false; | |
834 | } | |
835 | ||
836 | *entry = le64_to_cpu(*entry); | |
837 | if (!amdvi_validate_dte(s, devid, entry)) { | |
838 | trace_amdvi_invalid_dte(entry[0]); | |
839 | return false; | |
840 | } | |
841 | ||
842 | return true; | |
843 | } | |
844 | ||
845 | /* get pte translation mode */ | |
846 | static inline uint8_t get_pte_translation_mode(uint64_t pte) | |
847 | { | |
848 | return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK; | |
849 | } | |
850 | ||
851 | static inline uint64_t pte_override_page_mask(uint64_t pte) | |
852 | { | |
853 | uint8_t page_mask = 12; | |
854 | uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) ^ AMDVI_DEV_PT_ROOT_MASK; | |
855 | /* find the first zero bit */ | |
856 | while (addr & 1) { | |
857 | page_mask++; | |
858 | addr = addr >> 1; | |
859 | } | |
860 | ||
861 | return ~((1ULL << page_mask) - 1); | |
862 | } | |
863 | ||
864 | static inline uint64_t pte_get_page_mask(uint64_t oldlevel) | |
865 | { | |
866 | return ~((1UL << ((oldlevel * 9) + 3)) - 1); | |
867 | } | |
868 | ||
869 | static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr, | |
870 | uint16_t devid) | |
871 | { | |
872 | uint64_t pte; | |
873 | ||
874 | if (dma_memory_read(&address_space_memory, pte_addr, &pte, sizeof(pte))) { | |
875 | trace_amdvi_get_pte_hwerror(pte_addr); | |
876 | amdvi_log_pagetab_error(s, devid, pte_addr, 0); | |
877 | pte = 0; | |
878 | return pte; | |
879 | } | |
880 | ||
881 | pte = le64_to_cpu(pte); | |
882 | return pte; | |
883 | } | |
884 | ||
885 | static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte, | |
886 | IOMMUTLBEntry *ret, unsigned perms, | |
887 | hwaddr addr) | |
888 | { | |
889 | unsigned level, present, pte_perms, oldlevel; | |
890 | uint64_t pte = dte[0], pte_addr, page_mask; | |
891 | ||
892 | /* make sure the DTE has TV = 1 */ | |
893 | if (pte & AMDVI_DEV_TRANSLATION_VALID) { | |
894 | level = get_pte_translation_mode(pte); | |
895 | if (level >= 7) { | |
896 | trace_amdvi_mode_invalid(level, addr); | |
897 | return; | |
898 | } | |
899 | if (level == 0) { | |
900 | goto no_remap; | |
901 | } | |
902 | ||
903 | /* we are at the leaf page table or page table encodes a huge page */ | |
904 | while (level > 0) { | |
905 | pte_perms = amdvi_get_perms(pte); | |
906 | present = pte & 1; | |
907 | if (!present || perms != (perms & pte_perms)) { | |
908 | amdvi_page_fault(as->iommu_state, as->devfn, addr, perms); | |
909 | trace_amdvi_page_fault(addr); | |
910 | return; | |
911 | } | |
912 | ||
913 | /* go to the next lower level */ | |
914 | pte_addr = pte & AMDVI_DEV_PT_ROOT_MASK; | |
915 | /* add offset and load pte */ | |
916 | pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3; | |
917 | pte = amdvi_get_pte_entry(as->iommu_state, pte_addr, as->devfn); | |
918 | if (!pte) { | |
919 | return; | |
920 | } | |
921 | oldlevel = level; | |
922 | level = get_pte_translation_mode(pte); | |
923 | if (level == 0x7) { | |
924 | break; | |
925 | } | |
926 | } | |
927 | ||
928 | if (level == 0x7) { | |
929 | page_mask = pte_override_page_mask(pte); | |
930 | } else { | |
931 | page_mask = pte_get_page_mask(oldlevel); | |
932 | } | |
933 | ||
934 | /* get access permissions from pte */ | |
935 | ret->iova = addr & page_mask; | |
936 | ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & page_mask; | |
937 | ret->addr_mask = ~page_mask; | |
938 | ret->perm = amdvi_get_perms(pte); | |
939 | return; | |
940 | } | |
941 | no_remap: | |
942 | ret->iova = addr & AMDVI_PAGE_MASK_4K; | |
943 | ret->translated_addr = addr & AMDVI_PAGE_MASK_4K; | |
944 | ret->addr_mask = ~AMDVI_PAGE_MASK_4K; | |
945 | ret->perm = amdvi_get_perms(pte); | |
946 | } | |
947 | ||
948 | static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr, | |
949 | bool is_write, IOMMUTLBEntry *ret) | |
950 | { | |
951 | AMDVIState *s = as->iommu_state; | |
952 | uint16_t devid = PCI_BUILD_BDF(as->bus_num, as->devfn); | |
953 | AMDVIIOTLBEntry *iotlb_entry = amdvi_iotlb_lookup(s, addr, devid); | |
954 | uint64_t entry[4]; | |
955 | ||
956 | if (iotlb_entry) { | |
957 | trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid), PCI_SLOT(devid), | |
958 | PCI_FUNC(devid), addr, iotlb_entry->translated_addr); | |
959 | ret->iova = addr & ~iotlb_entry->page_mask; | |
960 | ret->translated_addr = iotlb_entry->translated_addr; | |
961 | ret->addr_mask = iotlb_entry->page_mask; | |
962 | ret->perm = iotlb_entry->perms; | |
963 | return; | |
964 | } | |
965 | ||
966 | /* devices with V = 0 are not translated */ | |
967 | if (!amdvi_get_dte(s, devid, entry)) { | |
968 | goto out; | |
969 | } | |
970 | ||
971 | amdvi_page_walk(as, entry, ret, | |
972 | is_write ? AMDVI_PERM_WRITE : AMDVI_PERM_READ, addr); | |
973 | ||
974 | amdvi_update_iotlb(s, devid, addr, *ret, | |
975 | entry[1] & AMDVI_DEV_DOMID_ID_MASK); | |
976 | return; | |
977 | ||
978 | out: | |
979 | ret->iova = addr & AMDVI_PAGE_MASK_4K; | |
980 | ret->translated_addr = addr & AMDVI_PAGE_MASK_4K; | |
981 | ret->addr_mask = ~AMDVI_PAGE_MASK_4K; | |
982 | ret->perm = IOMMU_RW; | |
983 | } | |
984 | ||
985 | static inline bool amdvi_is_interrupt_addr(hwaddr addr) | |
986 | { | |
987 | return addr >= AMDVI_INT_ADDR_FIRST && addr <= AMDVI_INT_ADDR_LAST; | |
988 | } | |
989 | ||
990 | static IOMMUTLBEntry amdvi_translate(MemoryRegion *iommu, hwaddr addr, | |
991 | bool is_write) | |
992 | { | |
993 | AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu); | |
994 | AMDVIState *s = as->iommu_state; | |
995 | IOMMUTLBEntry ret = { | |
996 | .target_as = &address_space_memory, | |
997 | .iova = addr, | |
998 | .translated_addr = 0, | |
999 | .addr_mask = ~(hwaddr)0, | |
1000 | .perm = IOMMU_NONE | |
1001 | }; | |
1002 | ||
1003 | if (!s->enabled) { | |
1004 | /* AMDVI disabled - corresponds to iommu=off not | |
1005 | * failure to provide any parameter | |
1006 | */ | |
1007 | ret.iova = addr & AMDVI_PAGE_MASK_4K; | |
1008 | ret.translated_addr = addr & AMDVI_PAGE_MASK_4K; | |
1009 | ret.addr_mask = ~AMDVI_PAGE_MASK_4K; | |
1010 | ret.perm = IOMMU_RW; | |
1011 | return ret; | |
1012 | } else if (amdvi_is_interrupt_addr(addr)) { | |
1013 | ret.iova = addr & AMDVI_PAGE_MASK_4K; | |
1014 | ret.translated_addr = addr & AMDVI_PAGE_MASK_4K; | |
1015 | ret.addr_mask = ~AMDVI_PAGE_MASK_4K; | |
1016 | ret.perm = IOMMU_WO; | |
1017 | return ret; | |
1018 | } | |
1019 | ||
1020 | amdvi_do_translate(as, addr, is_write, &ret); | |
1021 | trace_amdvi_translation_result(as->bus_num, PCI_SLOT(as->devfn), | |
1022 | PCI_FUNC(as->devfn), addr, ret.translated_addr); | |
1023 | return ret; | |
1024 | } | |
1025 | ||
1026 | static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) | |
1027 | { | |
1028 | AMDVIState *s = opaque; | |
1029 | AMDVIAddressSpace **iommu_as; | |
1030 | int bus_num = pci_bus_num(bus); | |
1031 | ||
1032 | iommu_as = s->address_spaces[bus_num]; | |
1033 | ||
1034 | /* allocate memory during the first run */ | |
1035 | if (!iommu_as) { | |
1036 | iommu_as = g_malloc0(sizeof(AMDVIAddressSpace *) * PCI_DEVFN_MAX); | |
1037 | s->address_spaces[bus_num] = iommu_as; | |
1038 | } | |
1039 | ||
1040 | /* set up AMD-Vi region */ | |
1041 | if (!iommu_as[devfn]) { | |
1042 | iommu_as[devfn] = g_malloc0(sizeof(AMDVIAddressSpace)); | |
1043 | iommu_as[devfn]->bus_num = (uint8_t)bus_num; | |
1044 | iommu_as[devfn]->devfn = (uint8_t)devfn; | |
1045 | iommu_as[devfn]->iommu_state = s; | |
1046 | ||
1047 | memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s), | |
1048 | &s->iommu_ops, "amd-iommu", UINT64_MAX); | |
1049 | address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu, | |
1050 | "amd-iommu"); | |
1051 | } | |
1052 | return &iommu_as[devfn]->as; | |
1053 | } | |
1054 | ||
1055 | static const MemoryRegionOps mmio_mem_ops = { | |
1056 | .read = amdvi_mmio_read, | |
1057 | .write = amdvi_mmio_write, | |
1058 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1059 | .impl = { | |
1060 | .min_access_size = 1, | |
1061 | .max_access_size = 8, | |
1062 | .unaligned = false, | |
1063 | }, | |
1064 | .valid = { | |
1065 | .min_access_size = 1, | |
1066 | .max_access_size = 8, | |
1067 | } | |
1068 | }; | |
1069 | ||
5bf3d319 PX |
1070 | static void amdvi_iommu_notify_flag_changed(MemoryRegion *iommu, |
1071 | IOMMUNotifierFlag old, | |
1072 | IOMMUNotifierFlag new) | |
d29a09ca DK |
1073 | { |
1074 | AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu); | |
1075 | ||
a3276f78 PX |
1076 | if (new & IOMMU_NOTIFIER_MAP) { |
1077 | error_report("device %02x.%02x.%x requires iommu notifier which is not " | |
1078 | "currently supported", as->bus_num, PCI_SLOT(as->devfn), | |
1079 | PCI_FUNC(as->devfn)); | |
1080 | exit(1); | |
1081 | } | |
d29a09ca DK |
1082 | } |
1083 | ||
1084 | static void amdvi_init(AMDVIState *s) | |
1085 | { | |
1086 | amdvi_iotlb_reset(s); | |
1087 | ||
1088 | s->iommu_ops.translate = amdvi_translate; | |
5bf3d319 | 1089 | s->iommu_ops.notify_flag_changed = amdvi_iommu_notify_flag_changed; |
d29a09ca DK |
1090 | s->devtab_len = 0; |
1091 | s->cmdbuf_len = 0; | |
1092 | s->cmdbuf_head = 0; | |
1093 | s->cmdbuf_tail = 0; | |
1094 | s->evtlog_head = 0; | |
1095 | s->evtlog_tail = 0; | |
1096 | s->excl_enabled = false; | |
1097 | s->excl_allow = false; | |
1098 | s->mmio_enabled = false; | |
1099 | s->enabled = false; | |
1100 | s->ats_enabled = false; | |
1101 | s->cmdbuf_enabled = false; | |
1102 | ||
1103 | /* reset MMIO */ | |
1104 | memset(s->mmior, 0, AMDVI_MMIO_SIZE); | |
1105 | amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES, | |
1106 | 0xffffffffffffffef, 0); | |
1107 | amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67); | |
1108 | ||
1109 | /* reset device ident */ | |
1110 | pci_config_set_vendor_id(s->pci.dev.config, PCI_VENDOR_ID_AMD); | |
1111 | pci_config_set_prog_interface(s->pci.dev.config, 00); | |
1112 | pci_config_set_device_id(s->pci.dev.config, s->devid); | |
1113 | pci_config_set_class(s->pci.dev.config, 0x0806); | |
1114 | ||
1115 | /* reset AMDVI specific capabilities, all r/o */ | |
1116 | pci_set_long(s->pci.dev.config + s->capab_offset, AMDVI_CAPAB_FEATURES); | |
1117 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_LOW, | |
1118 | s->mmio.addr & ~(0xffff0000)); | |
1119 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH, | |
1120 | (s->mmio.addr & ~(0xffff)) >> 16); | |
1121 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_RANGE, | |
1122 | 0xff000000); | |
1123 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, 0); | |
1124 | pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, | |
1125 | AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR); | |
1126 | } | |
1127 | ||
1128 | static void amdvi_reset(DeviceState *dev) | |
1129 | { | |
1130 | AMDVIState *s = AMD_IOMMU_DEVICE(dev); | |
1131 | ||
1132 | msi_reset(&s->pci.dev); | |
1133 | amdvi_init(s); | |
1134 | } | |
1135 | ||
1136 | static void amdvi_realize(DeviceState *dev, Error **err) | |
1137 | { | |
1d5b128c | 1138 | int ret = 0; |
d29a09ca | 1139 | AMDVIState *s = AMD_IOMMU_DEVICE(dev); |
fb9f5926 | 1140 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); |
d29a09ca DK |
1141 | PCIBus *bus = PC_MACHINE(qdev_get_machine())->bus; |
1142 | s->iotlb = g_hash_table_new_full(amdvi_uint64_hash, | |
1143 | amdvi_uint64_equal, g_free, g_free); | |
1144 | ||
1145 | /* This device should take care of IOMMU PCI properties */ | |
fb9f5926 | 1146 | x86_iommu->type = TYPE_AMD; |
d29a09ca DK |
1147 | qdev_set_parent_bus(DEVICE(&s->pci), &bus->qbus); |
1148 | object_property_set_bool(OBJECT(&s->pci), true, "realized", err); | |
1149 | s->capab_offset = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0, | |
1150 | AMDVI_CAPAB_SIZE); | |
1d5b128c DK |
1151 | assert(s->capab_offset > 0); |
1152 | ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0, AMDVI_CAPAB_REG_SIZE); | |
1153 | assert(ret > 0); | |
1154 | ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0, AMDVI_CAPAB_REG_SIZE); | |
1155 | assert(ret > 0); | |
d29a09ca DK |
1156 | |
1157 | /* set up MMIO */ | |
1158 | memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio", | |
1159 | AMDVI_MMIO_SIZE); | |
1160 | ||
1161 | sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio); | |
1162 | sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR); | |
1163 | pci_setup_iommu(bus, amdvi_host_dma_iommu, s); | |
1164 | s->devid = object_property_get_int(OBJECT(&s->pci), "addr", err); | |
1165 | msi_init(&s->pci.dev, 0, 1, true, false, err); | |
1166 | amdvi_init(s); | |
1167 | } | |
1168 | ||
1169 | static const VMStateDescription vmstate_amdvi = { | |
1170 | .name = "amd-iommu", | |
1171 | .unmigratable = 1 | |
1172 | }; | |
1173 | ||
1174 | static void amdvi_instance_init(Object *klass) | |
1175 | { | |
1176 | AMDVIState *s = AMD_IOMMU_DEVICE(klass); | |
1177 | ||
1178 | object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI); | |
1179 | } | |
1180 | ||
1181 | static void amdvi_class_init(ObjectClass *klass, void* data) | |
1182 | { | |
1183 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1184 | X86IOMMUClass *dc_class = X86_IOMMU_CLASS(klass); | |
1185 | ||
1186 | dc->reset = amdvi_reset; | |
1187 | dc->vmsd = &vmstate_amdvi; | |
1188 | dc->hotpluggable = false; | |
1189 | dc_class->realize = amdvi_realize; | |
1190 | } | |
1191 | ||
1192 | static const TypeInfo amdvi = { | |
1193 | .name = TYPE_AMD_IOMMU_DEVICE, | |
1194 | .parent = TYPE_X86_IOMMU_DEVICE, | |
1195 | .instance_size = sizeof(AMDVIState), | |
1196 | .instance_init = amdvi_instance_init, | |
1197 | .class_init = amdvi_class_init | |
1198 | }; | |
1199 | ||
1200 | static const TypeInfo amdviPCI = { | |
1201 | .name = "AMDVI-PCI", | |
1202 | .parent = TYPE_PCI_DEVICE, | |
1203 | .instance_size = sizeof(AMDVIPCIState), | |
1204 | }; | |
1205 | ||
1206 | static void amdviPCI_register_types(void) | |
1207 | { | |
1208 | type_register_static(&amdviPCI); | |
1209 | type_register_static(&amdvi); | |
1210 | } | |
1211 | ||
1212 | type_init(amdviPCI_register_types); |