* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "hw/sysbus.h"
#include "exec/address-spaces.h"
#include "intel_iommu_internal.h"
+#include "hw/pci/pci.h"
/*#define DEBUG_INTEL_IOMMU*/
#ifdef DEBUG_INTEL_IOMMU
enum {
DEBUG_GENERAL, DEBUG_CSR, DEBUG_INV, DEBUG_MMU, DEBUG_FLOG,
+ DEBUG_CACHE,
};
#define VTD_DBGBIT(x) (1 << DEBUG_##x)
static int vtd_dbgflags = VTD_DBGBIT(GENERAL) | VTD_DBGBIT(CSR);
return new_val;
}
+/* GHashTable functions */
+static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2)
+{
+ return *((const uint64_t *)v1) == *((const uint64_t *)v2);
+}
+
+static guint vtd_uint64_hash(gconstpointer v)
+{
+ return (guint)*(const uint64_t *)v;
+}
+
+static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
+ uint16_t domain_id = *(uint16_t *)user_data;
+ return entry->domain_id == domain_id;
+}
+
+/* The shift of an addr for a certain level of paging structure */
+static inline uint32_t vtd_slpt_level_shift(uint32_t level)
+{
+ return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
+}
+
+static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
+{
+ return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
+}
+
+static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
+ gpointer user_data)
+{
+ VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
+ VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
+ uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
+ uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
+ return (entry->domain_id == info->domain_id) &&
+ (((entry->gfn & info->mask) == gfn) ||
+ (entry->gfn == gfn_tlb));
+}
+
+/* Reset all the gen of VTDAddressSpace to zero and set the gen of
+ * IntelIOMMUState to 1.
+ */
+static void vtd_reset_context_cache(IntelIOMMUState *s)
+{
+ VTDAddressSpace *vtd_as;
+ VTDBus *vtd_bus;
+ GHashTableIter bus_it;
+ uint32_t devfn_it;
+
+ g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr);
+
+ VTD_DPRINTF(CACHE, "global context_cache_gen=1");
+ while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) {
+ for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {
+ vtd_as = vtd_bus->dev_as[devfn_it];
+ if (!vtd_as) {
+ continue;
+ }
+ vtd_as->context_cache_entry.context_cache_gen = 0;
+ }
+ }
+ s->context_cache_gen = 1;
+}
+
+static void vtd_reset_iotlb(IntelIOMMUState *s)
+{
+ assert(s->iotlb);
+ g_hash_table_remove_all(s->iotlb);
+}
+
+static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint8_t source_id,
+ uint32_t level)
+{
+ return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
+ ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT);
+}
+
+static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
+{
+ return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
+}
+
+static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
+ hwaddr addr)
+{
+ VTDIOTLBEntry *entry;
+ uint64_t key;
+ int level;
+
+ for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
+ key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level),
+ source_id, level);
+ entry = g_hash_table_lookup(s->iotlb, &key);
+ if (entry) {
+ goto out;
+ }
+ }
+
+out:
+ return entry;
+}
+
+static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
+ uint16_t domain_id, hwaddr addr, uint64_t slpte,
+ bool read_flags, bool write_flags,
+ uint32_t level)
+{
+ VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
+ uint64_t *key = g_malloc(sizeof(*key));
+ uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
+
+ VTD_DPRINTF(CACHE, "update iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
+ " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr, slpte,
+ domain_id);
+ if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
+ VTD_DPRINTF(CACHE, "iotlb exceeds size limit, forced to reset");
+ vtd_reset_iotlb(s);
+ }
+
+ entry->gfn = gfn;
+ entry->domain_id = domain_id;
+ entry->slpte = slpte;
+ entry->read_flags = read_flags;
+ entry->write_flags = write_flags;
+ entry->mask = vtd_slpt_level_page_mask(level);
+ *key = vtd_get_iotlb_key(gfn, source_id, level);
+ g_hash_table_replace(s->iotlb, key, entry);
+}
+
/* Given the reg addr of both the message data and address, generate an
* interrupt via MSI.
*/
data = vtd_get_long_raw(s, mesg_data_reg);
VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32, addr, data);
- stl_le_phys(&address_space_memory, addr, data);
+ address_space_stl_le(&address_space_memory, addr, data,
+ MEMTXATTRS_UNSPECIFIED, NULL);
}
/* Generate a fault event to software via MSI if conditions are met.
return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
}
-/* The shift of an addr for a certain level of paging structure */
-static inline uint32_t vtd_slpt_level_shift(uint32_t level)
-{
- return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
-}
-
static inline uint64_t vtd_get_slpte_addr(uint64_t slpte)
{
return slpte & VTD_SL_PT_BASE_ADDR_MASK;
/* Map dev to context-entry then do a paging-structures walk to do a iommu
* translation.
+ *
+ * Called from RCU critical section.
+ *
* @bus_num: The bus number
* @devfn: The devfn, which is the combined of device and function number
* @is_write: The access is a write operation
* @entry: IOMMUTLBEntry that contain the addr to be translated and result
*/
-static void vtd_do_iommu_translate(IntelIOMMUState *s, uint8_t bus_num,
+static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
uint8_t devfn, hwaddr addr, bool is_write,
IOMMUTLBEntry *entry)
{
+ IntelIOMMUState *s = vtd_as->iommu_state;
VTDContextEntry ce;
- uint64_t slpte;
+ uint8_t bus_num = pci_bus_num(bus);
+ VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
+ uint64_t slpte, page_mask;
uint32_t level;
uint16_t source_id = vtd_make_source_id(bus_num, devfn);
int ret_fr;
bool is_fpd_set = false;
bool reads = true;
bool writes = true;
+ VTDIOTLBEntry *iotlb_entry;
/* Check if the request is in interrupt address range */
if (vtd_is_interrupt_addr(addr)) {
return;
}
}
-
- ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
- is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
- if (ret_fr) {
- ret_fr = -ret_fr;
- if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
- VTD_DPRINTF(FLOG, "fault processing is disabled for DMA requests "
- "through this context-entry (with FPD Set)");
- } else {
- vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
+ /* Try to fetch slpte form IOTLB */
+ iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
+ if (iotlb_entry) {
+ VTD_DPRINTF(CACHE, "hit iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
+ " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr,
+ iotlb_entry->slpte, iotlb_entry->domain_id);
+ slpte = iotlb_entry->slpte;
+ reads = iotlb_entry->read_flags;
+ writes = iotlb_entry->write_flags;
+ page_mask = iotlb_entry->mask;
+ goto out;
+ }
+ /* Try to fetch context-entry from cache first */
+ if (cc_entry->context_cache_gen == s->context_cache_gen) {
+ VTD_DPRINTF(CACHE, "hit context-cache bus %d devfn %d "
+ "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 ")",
+ bus_num, devfn, cc_entry->context_entry.hi,
+ cc_entry->context_entry.lo, cc_entry->context_cache_gen);
+ ce = cc_entry->context_entry;
+ is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
+ } else {
+ ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
+ is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
+ if (ret_fr) {
+ ret_fr = -ret_fr;
+ if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
+ VTD_DPRINTF(FLOG, "fault processing is disabled for DMA "
+ "requests through this context-entry "
+ "(with FPD Set)");
+ } else {
+ vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
+ }
+ return;
}
- return;
+ /* Update context-cache */
+ VTD_DPRINTF(CACHE, "update context-cache bus %d devfn %d "
+ "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 "->%"PRIu32 ")",
+ bus_num, devfn, ce.hi, ce.lo,
+ cc_entry->context_cache_gen, s->context_cache_gen);
+ cc_entry->context_entry = ce;
+ cc_entry->context_cache_gen = s->context_cache_gen;
}
ret_fr = vtd_gpa_to_slpte(&ce, addr, is_write, &slpte, &level,
return;
}
- entry->iova = addr & VTD_PAGE_MASK_4K;
- entry->translated_addr = vtd_get_slpte_addr(slpte) & VTD_PAGE_MASK_4K;
- entry->addr_mask = ~VTD_PAGE_MASK_4K;
+ page_mask = vtd_slpt_level_page_mask(level);
+ vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
+ reads, writes, level);
+out:
+ entry->iova = addr & page_mask;
+ entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask;
+ entry->addr_mask = ~page_mask;
entry->perm = (writes ? 2 : 0) + (reads ? 1 : 0);
}
(s->root_extended ? "(extended)" : ""));
}
+static void vtd_context_global_invalidate(IntelIOMMUState *s)
+{
+ s->context_cache_gen++;
+ if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
+ vtd_reset_context_cache(s);
+ }
+}
+
+
+/* Find the VTD address space currently associated with a given bus number,
+ */
+static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num)
+{
+ VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num];
+ if (!vtd_bus) {
+ /* Iterate over the registered buses to find the one
+ * which currently hold this bus number, and update the bus_num lookup table:
+ */
+ GHashTableIter iter;
+
+ g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
+ while (g_hash_table_iter_next (&iter, NULL, (void**)&vtd_bus)) {
+ if (pci_bus_num(vtd_bus->bus) == bus_num) {
+ s->vtd_as_by_bus_num[bus_num] = vtd_bus;
+ return vtd_bus;
+ }
+ }
+ }
+ return vtd_bus;
+}
+
+/* Do a context-cache device-selective invalidation.
+ * @func_mask: FM field after shifting
+ */
+static void vtd_context_device_invalidate(IntelIOMMUState *s,
+ uint16_t source_id,
+ uint16_t func_mask)
+{
+ uint16_t mask;
+ VTDBus *vtd_bus;
+ VTDAddressSpace *vtd_as;
+ uint16_t devfn;
+ uint16_t devfn_it;
+
+ switch (func_mask & 3) {
+ case 0:
+ mask = 0; /* No bits in the SID field masked */
+ break;
+ case 1:
+ mask = 4; /* Mask bit 2 in the SID field */
+ break;
+ case 2:
+ mask = 6; /* Mask bit 2:1 in the SID field */
+ break;
+ case 3:
+ mask = 7; /* Mask bit 2:0 in the SID field */
+ break;
+ }
+ VTD_DPRINTF(INV, "device-selective invalidation source 0x%"PRIx16
+ " mask %"PRIu16, source_id, mask);
+ vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id));
+ if (vtd_bus) {
+ devfn = VTD_SID_TO_DEVFN(source_id);
+ for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {
+ vtd_as = vtd_bus->dev_as[devfn_it];
+ if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
+ VTD_DPRINTF(INV, "invalidate context-cahce of devfn 0x%"PRIx16,
+ devfn_it);
+ vtd_as->context_cache_entry.context_cache_gen = 0;
+ }
+ }
+ }
+}
+
/* Context-cache invalidation
* Returns the Context Actual Invalidation Granularity.
* @val: the content of the CCMD_REG
uint64_t type = val & VTD_CCMD_CIRG_MASK;
switch (type) {
+ case VTD_CCMD_DOMAIN_INVL:
+ VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
+ (uint16_t)VTD_CCMD_DID(val));
+ /* Fall through */
case VTD_CCMD_GLOBAL_INVL:
- VTD_DPRINTF(INV, "Global invalidation request");
+ VTD_DPRINTF(INV, "global invalidation");
caig = VTD_CCMD_GLOBAL_INVL_A;
- break;
-
- case VTD_CCMD_DOMAIN_INVL:
- VTD_DPRINTF(INV, "Domain-selective invalidation request");
- caig = VTD_CCMD_DOMAIN_INVL_A;
+ vtd_context_global_invalidate(s);
break;
case VTD_CCMD_DEVICE_INVL:
- VTD_DPRINTF(INV, "Domain-selective invalidation request");
caig = VTD_CCMD_DEVICE_INVL_A;
+ vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val));
break;
default:
- VTD_DPRINTF(GENERAL,
- "error: wrong context-cache invalidation granularity");
+ VTD_DPRINTF(GENERAL, "error: invalid granularity");
caig = 0;
}
return caig;
}
+static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
+{
+ vtd_reset_iotlb(s);
+}
+
+static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
+{
+ g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
+ &domain_id);
+}
+
+static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
+ hwaddr addr, uint8_t am)
+{
+ VTDIOTLBPageInvInfo info;
+
+ assert(am <= VTD_MAMV);
+ info.domain_id = domain_id;
+ info.addr = addr;
+ info.mask = ~((1 << am) - 1);
+ g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
+}
+
/* Flush IOTLB
* Returns the IOTLB Actual Invalidation Granularity.
* @val: the content of the IOTLB_REG
{
uint64_t iaig;
uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK;
+ uint16_t domain_id;
+ hwaddr addr;
+ uint8_t am;
switch (type) {
case VTD_TLB_GLOBAL_FLUSH:
- VTD_DPRINTF(INV, "Global IOTLB flush");
+ VTD_DPRINTF(INV, "global invalidation");
iaig = VTD_TLB_GLOBAL_FLUSH_A;
+ vtd_iotlb_global_invalidate(s);
break;
case VTD_TLB_DSI_FLUSH:
- VTD_DPRINTF(INV, "Domain-selective IOTLB flush");
+ domain_id = VTD_TLB_DID(val);
+ VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
+ domain_id);
iaig = VTD_TLB_DSI_FLUSH_A;
+ vtd_iotlb_domain_invalidate(s, domain_id);
break;
case VTD_TLB_PSI_FLUSH:
- VTD_DPRINTF(INV, "Page-selective-within-domain IOTLB flush");
+ domain_id = VTD_TLB_DID(val);
+ addr = vtd_get_quad_raw(s, DMAR_IVA_REG);
+ am = VTD_IVA_AM(addr);
+ addr = VTD_IVA_ADDR(addr);
+ VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
+ " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
+ if (am > VTD_MAMV) {
+ VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
+ "%"PRIu8, (uint8_t)VTD_MAMV);
+ iaig = 0;
+ break;
+ }
iaig = VTD_TLB_PSI_FLUSH_A;
+ vtd_iotlb_page_invalidate(s, domain_id, addr, am);
break;
default:
- VTD_DPRINTF(GENERAL, "error: wrong iotlb flush granularity");
+ VTD_DPRINTF(GENERAL, "error: invalid granularity");
iaig = 0;
}
return iaig;
return true;
}
+static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
+ VTDInvDesc *inv_desc)
+{
+ if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
+ VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Context-cache "
+ "Invalidate Descriptor");
+ return false;
+ }
+ switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
+ case VTD_INV_DESC_CC_DOMAIN:
+ VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
+ (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
+ /* Fall through */
+ case VTD_INV_DESC_CC_GLOBAL:
+ VTD_DPRINTF(INV, "global invalidation");
+ vtd_context_global_invalidate(s);
+ break;
+
+ case VTD_INV_DESC_CC_DEVICE:
+ vtd_context_device_invalidate(s, VTD_INV_DESC_CC_SID(inv_desc->lo),
+ VTD_INV_DESC_CC_FM(inv_desc->lo));
+ break;
+
+ default:
+ VTD_DPRINTF(GENERAL, "error: invalid granularity in Context-cache "
+ "Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
+ inv_desc->hi, inv_desc->lo);
+ return false;
+ }
+ return true;
+}
+
+static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
+{
+ uint16_t domain_id;
+ uint8_t am;
+ hwaddr addr;
+
+ if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
+ (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
+ VTD_DPRINTF(GENERAL, "error: non-zero reserved field in IOTLB "
+ "Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
+ inv_desc->hi, inv_desc->lo);
+ return false;
+ }
+
+ switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
+ case VTD_INV_DESC_IOTLB_GLOBAL:
+ VTD_DPRINTF(INV, "global invalidation");
+ vtd_iotlb_global_invalidate(s);
+ break;
+
+ case VTD_INV_DESC_IOTLB_DOMAIN:
+ domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
+ VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
+ domain_id);
+ vtd_iotlb_domain_invalidate(s, domain_id);
+ break;
+
+ case VTD_INV_DESC_IOTLB_PAGE:
+ domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
+ addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
+ am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
+ VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
+ " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
+ if (am > VTD_MAMV) {
+ VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
+ "%"PRIu8, (uint8_t)VTD_MAMV);
+ return false;
+ }
+ vtd_iotlb_page_invalidate(s, domain_id, addr, am);
+ break;
+
+ default:
+ VTD_DPRINTF(GENERAL, "error: invalid granularity in IOTLB Invalidate "
+ "Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
+ inv_desc->hi, inv_desc->lo);
+ return false;
+ }
+ return true;
+}
+
static bool vtd_process_inv_desc(IntelIOMMUState *s)
{
VTDInvDesc inv_desc;
case VTD_INV_DESC_CC:
VTD_DPRINTF(INV, "Context-cache Invalidate Descriptor hi 0x%"PRIx64
" lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
+ if (!vtd_process_context_cache_desc(s, &inv_desc)) {
+ return false;
+ }
break;
case VTD_INV_DESC_IOTLB:
VTD_DPRINTF(INV, "IOTLB Invalidate Descriptor hi 0x%"PRIx64
" lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
+ if (!vtd_process_iotlb_desc(s, &inv_desc)) {
+ return false;
+ }
break;
case VTD_INV_DESC_WAIT:
vtd_handle_iotlb_write(s);
break;
+ /* Invalidate Address Register, 64-bit */
+ case DMAR_IVA_REG:
+ VTD_DPRINTF(INV, "DMAR_IVA_REG write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64, addr, size, val);
+ if (size == 4) {
+ vtd_set_long(s, addr, val);
+ } else {
+ vtd_set_quad(s, addr, val);
+ }
+ break;
+
+ case DMAR_IVA_REG_HI:
+ VTD_DPRINTF(INV, "DMAR_IVA_REG_HI write addr 0x%"PRIx64
+ ", size %d, val 0x%"PRIx64, addr, size, val);
+ assert(size == 4);
+ vtd_set_long(s, addr, val);
+ break;
+
/* Fault Status Register, 32-bit */
case DMAR_FSTS_REG:
VTD_DPRINTF(FLOG, "DMAR_FSTS_REG write addr 0x%"PRIx64
{
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
IntelIOMMUState *s = vtd_as->iommu_state;
- uint8_t bus_num = vtd_as->bus_num;
- uint8_t devfn = vtd_as->devfn;
IOMMUTLBEntry ret = {
.target_as = &address_space_memory,
.iova = addr,
return ret;
}
- vtd_do_iommu_translate(s, bus_num, devfn, addr, is_write, &ret);
-
+ vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, addr,
+ is_write, &ret);
VTD_DPRINTF(MMU,
"bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
- " gpa 0x%"PRIx64 " hpa 0x%"PRIx64, bus_num,
- VTD_PCI_SLOT(devfn), VTD_PCI_FUNC(devfn), devfn, addr,
- ret.translated_addr);
+ " gpa 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
+ VTD_PCI_SLOT(vtd_as->devfn), VTD_PCI_FUNC(vtd_as->devfn),
+ vtd_as->devfn, addr, ret.translated_addr);
return ret;
}
DEFINE_PROP_END_OF_LIST(),
};
+
+VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
+{
+ uintptr_t key = (uintptr_t)bus;
+ VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
+ VTDAddressSpace *vtd_dev_as;
+
+ if (!vtd_bus) {
+ /* No corresponding free() */
+ vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * VTD_PCI_DEVFN_MAX);
+ vtd_bus->bus = bus;
+ key = (uintptr_t)bus;
+ g_hash_table_insert(s->vtd_as_by_busptr, &key, vtd_bus);
+ }
+
+ vtd_dev_as = vtd_bus->dev_as[devfn];
+
+ if (!vtd_dev_as) {
+ vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
+
+ vtd_dev_as->bus = bus;
+ vtd_dev_as->devfn = (uint8_t)devfn;
+ vtd_dev_as->iommu_state = s;
+ vtd_dev_as->context_cache_entry.context_cache_gen = 0;
+ memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s),
+ &s->iommu_ops, "intel_iommu", UINT64_MAX);
+ address_space_init(&vtd_dev_as->as,
+ &vtd_dev_as->iommu, "intel_iommu");
+ }
+ return vtd_dev_as;
+}
+
/* Do the initialization. It will also be called when reset, so pay
* attention when adding new initialization stuff.
*/
s->iq_last_desc_type = VTD_INV_DESC_NONE;
s->next_frcd_reg = 0;
s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW |
- VTD_CAP_SAGAW;
+ VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS;
s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
+ vtd_reset_context_cache(s);
+ vtd_reset_iotlb(s);
+
/* Define registers with default values and bit semantics */
vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0);
vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0);
IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
VTD_DPRINTF(GENERAL, "");
- memset(s->address_spaces, 0, sizeof(s->address_spaces));
+ memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
"intel_iommu", DMAR_REG_SIZE);
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
+ /* No corresponding destroy */
+ s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
+ g_free, g_free);
+ s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
+ g_free, g_free);
vtd_init(s);
}