2 * QEMU sPAPR IOMMU (TCE) code
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/error-report.h"
23 #include "sysemu/kvm.h"
26 #include "sysemu/dma.h"
27 #include "exec/address-spaces.h"
30 #include "hw/ppc/spapr.h"
31 #include "hw/ppc/spapr_vio.h"
42 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift))
43 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1))
45 static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
47 sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn)
51 if (liobn & 0xFFFFFFFF00000000ULL) {
52 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n",
57 QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
58 if (tcet->liobn == (uint32_t)liobn) {
66 static IOMMUAccessFlags spapr_tce_iommu_access_flags(uint64_t tce)
68 switch (tce & SPAPR_TCE_RW) {
75 default: /* SPAPR_TCE_RW */
80 static uint64_t *spapr_tce_alloc_table(uint32_t liobn,
87 uint64_t *table = NULL;
90 table = kvmppc_create_spapr_tce(liobn, page_shift, bus_offset, nb_table,
96 table = g_malloc0(nb_table * sizeof(uint64_t));
99 trace_spapr_iommu_new_table(liobn, table, *fd);
104 static void spapr_tce_free_table(uint64_t *table, int fd, uint32_t nb_table)
106 if (!kvm_enabled() ||
107 (kvmppc_remove_spapr_tce(table, fd, nb_table) != 0)) {
112 /* Called from RCU critical section */
113 static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr,
114 IOMMUAccessFlags flag)
116 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
118 IOMMUTLBEntry ret = {
119 .target_as = &address_space_memory,
121 .translated_addr = 0,
122 .addr_mask = ~(hwaddr)0,
126 if ((addr >> tcet->page_shift) < tcet->nb_table) {
127 /* Check if we are in bound */
128 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
130 tce = tcet->table[addr >> tcet->page_shift];
131 ret.iova = addr & page_mask;
132 ret.translated_addr = tce & page_mask;
133 ret.addr_mask = ~page_mask;
134 ret.perm = spapr_tce_iommu_access_flags(tce);
136 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm,
142 static void spapr_tce_table_pre_save(void *opaque)
144 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
146 tcet->mig_table = tcet->table;
147 tcet->mig_nb_table = tcet->nb_table;
149 trace_spapr_iommu_pre_save(tcet->liobn, tcet->mig_nb_table,
150 tcet->bus_offset, tcet->page_shift);
153 static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu)
155 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
157 return 1ULL << tcet->page_shift;
160 static void spapr_tce_notify_flag_changed(MemoryRegion *iommu,
161 IOMMUNotifierFlag old,
162 IOMMUNotifierFlag new)
164 struct sPAPRTCETable *tbl = container_of(iommu, sPAPRTCETable, iommu);
166 if (old == IOMMU_NOTIFIER_NONE && new != IOMMU_NOTIFIER_NONE) {
167 spapr_tce_set_need_vfio(tbl, true);
168 } else if (old != IOMMU_NOTIFIER_NONE && new == IOMMU_NOTIFIER_NONE) {
169 spapr_tce_set_need_vfio(tbl, false);
173 static int spapr_tce_table_post_load(void *opaque, int version_id)
175 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
176 uint32_t old_nb_table = tcet->nb_table;
177 uint64_t old_bus_offset = tcet->bus_offset;
178 uint32_t old_page_shift = tcet->page_shift;
181 spapr_vio_set_bypass(tcet->vdev, tcet->bypass);
184 if (tcet->mig_nb_table != tcet->nb_table) {
185 spapr_tce_table_disable(tcet);
188 if (tcet->mig_nb_table) {
189 if (!tcet->nb_table) {
190 spapr_tce_table_enable(tcet, old_page_shift, old_bus_offset,
194 memcpy(tcet->table, tcet->mig_table,
195 tcet->nb_table * sizeof(tcet->table[0]));
197 free(tcet->mig_table);
198 tcet->mig_table = NULL;
201 trace_spapr_iommu_post_load(tcet->liobn, old_nb_table, tcet->nb_table,
202 tcet->bus_offset, tcet->page_shift);
207 static bool spapr_tce_table_ex_needed(void *opaque)
209 sPAPRTCETable *tcet = opaque;
211 return tcet->bus_offset || tcet->page_shift != 0xC;
214 static const VMStateDescription vmstate_spapr_tce_table_ex = {
215 .name = "spapr_iommu_ex",
217 .minimum_version_id = 1,
218 .needed = spapr_tce_table_ex_needed,
219 .fields = (VMStateField[]) {
220 VMSTATE_UINT64(bus_offset, sPAPRTCETable),
221 VMSTATE_UINT32(page_shift, sPAPRTCETable),
222 VMSTATE_END_OF_LIST()
226 static const VMStateDescription vmstate_spapr_tce_table = {
227 .name = "spapr_iommu",
229 .minimum_version_id = 2,
230 .pre_save = spapr_tce_table_pre_save,
231 .post_load = spapr_tce_table_post_load,
232 .fields = (VMStateField []) {
234 VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable, NULL),
237 VMSTATE_UINT32(mig_nb_table, sPAPRTCETable),
238 VMSTATE_BOOL(bypass, sPAPRTCETable),
239 VMSTATE_VARRAY_UINT32_ALLOC(mig_table, sPAPRTCETable, mig_nb_table, 0,
240 vmstate_info_uint64, uint64_t),
242 VMSTATE_END_OF_LIST()
244 .subsections = (const VMStateDescription*[]) {
245 &vmstate_spapr_tce_table_ex,
250 static MemoryRegionIOMMUOps spapr_iommu_ops = {
251 .translate = spapr_tce_translate_iommu,
252 .get_min_page_size = spapr_tce_get_min_page_size,
253 .notify_flag_changed = spapr_tce_notify_flag_changed,
256 static int spapr_tce_table_realize(DeviceState *dev)
258 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
259 Object *tcetobj = OBJECT(tcet);
263 tcet->need_vfio = false;
264 snprintf(tmp, sizeof(tmp), "tce-root-%x", tcet->liobn);
265 memory_region_init(&tcet->root, tcetobj, tmp, UINT64_MAX);
267 snprintf(tmp, sizeof(tmp), "tce-iommu-%x", tcet->liobn);
268 memory_region_init_iommu(&tcet->iommu, tcetobj, &spapr_iommu_ops, tmp, 0);
270 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
272 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table,
278 void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio)
280 size_t table_size = tcet->nb_table * sizeof(uint64_t);
283 if (need_vfio == tcet->need_vfio) {
289 /* FIXME: We don't support transition back to KVM accelerated
294 tcet->need_vfio = true;
297 /* Table is already in userspace, nothing to be do */
301 newtable = g_malloc(table_size);
302 memcpy(newtable, tcet->table, table_size);
304 kvmppc_remove_spapr_tce(tcet->table, tcet->fd, tcet->nb_table);
307 tcet->table = newtable;
310 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn)
315 if (spapr_tce_find_by_liobn(liobn)) {
316 error_report("Attempted to create TCE table with duplicate"
317 " LIOBN 0x%x", liobn);
321 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE));
324 snprintf(tmp, sizeof(tmp), "tce-table-%x", liobn);
325 object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL);
327 object_property_set_bool(OBJECT(tcet), true, "realized", NULL);
332 void spapr_tce_table_enable(sPAPRTCETable *tcet,
333 uint32_t page_shift, uint64_t bus_offset,
336 if (tcet->nb_table) {
337 error_report("Warning: trying to enable already enabled TCE table");
341 tcet->bus_offset = bus_offset;
342 tcet->page_shift = page_shift;
343 tcet->nb_table = nb_table;
344 tcet->table = spapr_tce_alloc_table(tcet->liobn,
351 memory_region_set_size(&tcet->iommu,
352 (uint64_t)tcet->nb_table << tcet->page_shift);
353 memory_region_add_subregion(&tcet->root, tcet->bus_offset, &tcet->iommu);
356 void spapr_tce_table_disable(sPAPRTCETable *tcet)
358 if (!tcet->nb_table) {
362 memory_region_del_subregion(&tcet->root, &tcet->iommu);
363 memory_region_set_size(&tcet->iommu, 0);
365 spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table);
368 tcet->bus_offset = 0;
369 tcet->page_shift = 0;
373 static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp)
375 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
377 QLIST_REMOVE(tcet, list);
379 spapr_tce_table_disable(tcet);
382 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet)
387 static void spapr_tce_reset(DeviceState *dev)
389 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
390 size_t table_size = tcet->nb_table * sizeof(uint64_t);
392 if (tcet->nb_table) {
393 memset(tcet->table, 0, table_size);
397 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
401 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
402 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
404 if (index >= tcet->nb_table) {
405 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x"
406 TARGET_FMT_lx "\n", ioba);
410 tcet->table[index] = tce;
412 entry.target_as = &address_space_memory,
413 entry.iova = (ioba - tcet->bus_offset) & page_mask;
414 entry.translated_addr = tce & page_mask;
415 entry.addr_mask = ~page_mask;
416 entry.perm = spapr_tce_iommu_access_flags(tce);
417 memory_region_notify_iommu(&tcet->iommu, entry);
422 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
423 sPAPRMachineState *spapr,
424 target_ulong opcode, target_ulong *args)
427 target_ulong liobn = args[0];
428 target_ulong ioba = args[1];
429 target_ulong ioba1 = ioba;
430 target_ulong tce_list = args[2];
431 target_ulong npages = args[3];
432 target_ulong ret = H_PARAMETER, tce = 0;
433 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
434 CPUState *cs = CPU(cpu);
435 hwaddr page_mask, page_size;
441 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) {
445 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
446 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
449 for (i = 0; i < npages; ++i, ioba += page_size) {
450 tce = ldq_be_phys(cs->as, tce_list + i * sizeof(target_ulong));
452 ret = put_tce_emu(tcet, ioba, tce);
458 /* Trace last successful or the first problematic entry */
460 if (SPAPR_IS_PCI_LIOBN(liobn)) {
461 trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret);
463 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret);
468 static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
469 target_ulong opcode, target_ulong *args)
472 target_ulong liobn = args[0];
473 target_ulong ioba = args[1];
474 target_ulong tce_value = args[2];
475 target_ulong npages = args[3];
476 target_ulong ret = H_PARAMETER;
477 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
478 hwaddr page_mask, page_size;
484 if (npages > tcet->nb_table) {
488 page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
489 page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
492 for (i = 0; i < npages; ++i, ioba += page_size) {
493 ret = put_tce_emu(tcet, ioba, tce_value);
498 if (SPAPR_IS_PCI_LIOBN(liobn)) {
499 trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret);
501 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
507 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
508 target_ulong opcode, target_ulong *args)
510 target_ulong liobn = args[0];
511 target_ulong ioba = args[1];
512 target_ulong tce = args[2];
513 target_ulong ret = H_PARAMETER;
514 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
517 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
521 ret = put_tce_emu(tcet, ioba, tce);
523 if (SPAPR_IS_PCI_LIOBN(liobn)) {
524 trace_spapr_iommu_pci_put(liobn, ioba, tce, ret);
526 trace_spapr_iommu_put(liobn, ioba, tce, ret);
532 static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
535 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
537 if (index >= tcet->nb_table) {
538 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x"
539 TARGET_FMT_lx "\n", ioba);
543 *tce = tcet->table[index];
548 static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
549 target_ulong opcode, target_ulong *args)
551 target_ulong liobn = args[0];
552 target_ulong ioba = args[1];
553 target_ulong tce = 0;
554 target_ulong ret = H_PARAMETER;
555 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
558 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
562 ret = get_tce_emu(tcet, ioba, &tce);
567 if (SPAPR_IS_PCI_LIOBN(liobn)) {
568 trace_spapr_iommu_pci_get(liobn, ioba, ret, tce);
570 trace_spapr_iommu_get(liobn, ioba, ret, tce);
576 int spapr_dma_dt(void *fdt, int node_off, const char *propname,
577 uint32_t liobn, uint64_t window, uint32_t size)
579 uint32_t dma_prop[5];
582 dma_prop[0] = cpu_to_be32(liobn);
583 dma_prop[1] = cpu_to_be32(window >> 32);
584 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF);
585 dma_prop[3] = 0; /* window size is 32 bits */
586 dma_prop[4] = cpu_to_be32(size);
588 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
593 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
598 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop));
606 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
613 return spapr_dma_dt(fdt, node_off, propname,
614 tcet->liobn, 0, tcet->nb_table << tcet->page_shift);
617 static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
619 DeviceClass *dc = DEVICE_CLASS(klass);
620 dc->init = spapr_tce_table_realize;
621 dc->reset = spapr_tce_reset;
622 dc->unrealize = spapr_tce_table_unrealize;
624 QLIST_INIT(&spapr_tce_tables);
627 spapr_register_hypercall(H_PUT_TCE, h_put_tce);
628 spapr_register_hypercall(H_GET_TCE, h_get_tce);
629 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
630 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
633 static TypeInfo spapr_tce_table_info = {
634 .name = TYPE_SPAPR_TCE_TABLE,
635 .parent = TYPE_DEVICE,
636 .instance_size = sizeof(sPAPRTCETable),
637 .class_init = spapr_tce_table_class_init,
640 static void register_types(void)
642 type_register_static(&spapr_tce_table_info);
645 type_init(register_types);