1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2014 Intel Corporation.
13 #define pr_fmt(fmt) "DMAR: " fmt
14 #define dev_fmt(fmt) pr_fmt(fmt)
16 #include <linux/init.h>
17 #include <linux/bitmap.h>
18 #include <linux/debugfs.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/irq.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/pci.h>
25 #include <linux/dmar.h>
26 #include <linux/dma-map-ops.h>
27 #include <linux/mempool.h>
28 #include <linux/memory.h>
29 #include <linux/cpu.h>
30 #include <linux/timer.h>
32 #include <linux/iova.h>
33 #include <linux/iommu.h>
34 #include <linux/dma-iommu.h>
35 #include <linux/intel-iommu.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/tboot.h>
38 #include <linux/dmi.h>
39 #include <linux/pci-ats.h>
40 #include <linux/memblock.h>
41 #include <linux/dma-direct.h>
42 #include <linux/crash_dump.h>
43 #include <linux/numa.h>
44 #include <asm/irq_remapping.h>
45 #include <asm/cacheflush.h>
46 #include <asm/iommu.h>
48 #include "../irq_remapping.h"
50 #include "cap_audit.h"
52 #define ROOT_SIZE VTD_PAGE_SIZE
53 #define CONTEXT_SIZE VTD_PAGE_SIZE
55 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
56 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
57 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
58 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
60 #define IOAPIC_RANGE_START (0xfee00000)
61 #define IOAPIC_RANGE_END (0xfeefffff)
62 #define IOVA_START_ADDR (0x1000)
64 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
66 #define MAX_AGAW_WIDTH 64
67 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
69 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
70 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
72 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
73 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
74 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
75 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
76 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
78 /* IO virtual address start page frame number */
79 #define IOVA_START_PFN (1)
81 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
83 /* page table handling */
84 #define LEVEL_STRIDE (9)
85 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
88 * This bitmap is used to advertise the page sizes our hardware support
89 * to the IOMMU core, which will then use this information to split
90 * physically contiguous memory regions it is mapping into page sizes
93 * Traditionally the IOMMU core just handed us the mappings directly,
94 * after making sure the size is an order of a 4KiB page and that the
95 * mapping has natural alignment.
97 * To retain this behavior, we currently advertise that we support
98 * all page sizes that are an order of 4KiB.
100 * If at some point we'd like to utilize the IOMMU core's new behavior,
101 * we could change this to advertise the real page sizes we support.
103 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
105 static inline int agaw_to_level(int agaw)
110 static inline int agaw_to_width(int agaw)
112 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
115 static inline int width_to_agaw(int width)
117 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
120 static inline unsigned int level_to_offset_bits(int level)
122 return (level - 1) * LEVEL_STRIDE;
125 static inline int pfn_level_offset(u64 pfn, int level)
127 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
130 static inline u64 level_mask(int level)
132 return -1ULL << level_to_offset_bits(level);
135 static inline u64 level_size(int level)
137 return 1ULL << level_to_offset_bits(level);
140 static inline u64 align_to_level(u64 pfn, int level)
142 return (pfn + level_size(level) - 1) & level_mask(level);
145 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
147 return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
150 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
151 are never going to work. */
152 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
154 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
157 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
159 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
161 static inline unsigned long page_to_dma_pfn(struct page *pg)
163 return mm_to_dma_pfn(page_to_pfn(pg));
165 static inline unsigned long virt_to_dma_pfn(void *p)
167 return page_to_dma_pfn(virt_to_page(p));
170 /* global iommu list, set NULL for ignored DMAR units */
171 static struct intel_iommu **g_iommus;
173 static void __init check_tylersburg_isoch(void);
174 static int rwbf_quirk;
177 * set to 1 to panic kernel if can't successfully enable VT-d
178 * (used when kernel is launched w/ TXT)
180 static int force_on = 0;
181 static int intel_iommu_tboot_noforce;
182 static int no_platform_optin;
184 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
187 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
190 static phys_addr_t root_entry_lctp(struct root_entry *re)
195 return re->lo & VTD_PAGE_MASK;
199 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
202 static phys_addr_t root_entry_uctp(struct root_entry *re)
207 return re->hi & VTD_PAGE_MASK;
210 static inline void context_clear_pasid_enable(struct context_entry *context)
212 context->lo &= ~(1ULL << 11);
215 static inline bool context_pasid_enabled(struct context_entry *context)
217 return !!(context->lo & (1ULL << 11));
220 static inline void context_set_copied(struct context_entry *context)
222 context->hi |= (1ull << 3);
225 static inline bool context_copied(struct context_entry *context)
227 return !!(context->hi & (1ULL << 3));
230 static inline bool __context_present(struct context_entry *context)
232 return (context->lo & 1);
235 bool context_present(struct context_entry *context)
237 return context_pasid_enabled(context) ?
238 __context_present(context) :
239 __context_present(context) && !context_copied(context);
242 static inline void context_set_present(struct context_entry *context)
247 static inline void context_set_fault_enable(struct context_entry *context)
249 context->lo &= (((u64)-1) << 2) | 1;
252 static inline void context_set_translation_type(struct context_entry *context,
255 context->lo &= (((u64)-1) << 4) | 3;
256 context->lo |= (value & 3) << 2;
259 static inline void context_set_address_root(struct context_entry *context,
262 context->lo &= ~VTD_PAGE_MASK;
263 context->lo |= value & VTD_PAGE_MASK;
266 static inline void context_set_address_width(struct context_entry *context,
269 context->hi |= value & 7;
272 static inline void context_set_domain_id(struct context_entry *context,
275 context->hi |= (value & ((1 << 16) - 1)) << 8;
278 static inline int context_domain_id(struct context_entry *c)
280 return((c->hi >> 8) & 0xffff);
283 static inline void context_clear_entry(struct context_entry *context)
290 * This domain is a statically identity mapping domain.
291 * 1. This domain creats a static 1:1 mapping to all usable memory.
292 * 2. It maps to each iommu if successful.
293 * 3. Each iommu mapps to this domain if successful.
295 static struct dmar_domain *si_domain;
296 static int hw_pass_through = 1;
298 #define for_each_domain_iommu(idx, domain) \
299 for (idx = 0; idx < g_num_of_iommus; idx++) \
300 if (domain->iommu_refcnt[idx])
302 struct dmar_rmrr_unit {
303 struct list_head list; /* list of rmrr units */
304 struct acpi_dmar_header *hdr; /* ACPI header */
305 u64 base_address; /* reserved base address*/
306 u64 end_address; /* reserved end address */
307 struct dmar_dev_scope *devices; /* target devices */
308 int devices_cnt; /* target device count */
311 struct dmar_atsr_unit {
312 struct list_head list; /* list of ATSR units */
313 struct acpi_dmar_header *hdr; /* ACPI header */
314 struct dmar_dev_scope *devices; /* target devices */
315 int devices_cnt; /* target device count */
316 u8 include_all:1; /* include all ports */
319 struct dmar_satc_unit {
320 struct list_head list; /* list of SATC units */
321 struct acpi_dmar_header *hdr; /* ACPI header */
322 struct dmar_dev_scope *devices; /* target devices */
323 struct intel_iommu *iommu; /* the corresponding iommu */
324 int devices_cnt; /* target device count */
325 u8 atc_required:1; /* ATS is required */
328 static LIST_HEAD(dmar_atsr_units);
329 static LIST_HEAD(dmar_rmrr_units);
330 static LIST_HEAD(dmar_satc_units);
332 #define for_each_rmrr_units(rmrr) \
333 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
335 /* bitmap for indexing intel_iommus */
336 static int g_num_of_iommus;
338 static void domain_exit(struct dmar_domain *domain);
339 static void domain_remove_dev_info(struct dmar_domain *domain);
340 static void dmar_remove_one_dev_info(struct device *dev);
341 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
342 static int intel_iommu_attach_device(struct iommu_domain *domain,
344 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
347 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
348 int dmar_disabled = 0;
350 int dmar_disabled = 1;
351 #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
353 #ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
354 int intel_iommu_sm = 1;
357 #endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
359 int intel_iommu_enabled = 0;
360 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
362 static int dmar_map_gfx = 1;
363 static int intel_iommu_strict;
364 static int intel_iommu_superpage = 1;
365 static int iommu_identity_mapping;
366 static int iommu_skip_te_disable;
368 #define IDENTMAP_GFX 2
369 #define IDENTMAP_AZALIA 4
371 int intel_iommu_gfx_mapped;
372 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
374 #define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
375 struct device_domain_info *get_domain_info(struct device *dev)
377 struct device_domain_info *info;
382 info = dev_iommu_priv_get(dev);
383 if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
389 DEFINE_SPINLOCK(device_domain_lock);
390 static LIST_HEAD(device_domain_list);
393 * Iterate over elements in device_domain_list and call the specified
394 * callback @fn against each element.
396 int for_each_device_domain(int (*fn)(struct device_domain_info *info,
397 void *data), void *data)
401 struct device_domain_info *info;
403 spin_lock_irqsave(&device_domain_lock, flags);
404 list_for_each_entry(info, &device_domain_list, global) {
405 ret = fn(info, data);
407 spin_unlock_irqrestore(&device_domain_lock, flags);
411 spin_unlock_irqrestore(&device_domain_lock, flags);
416 const struct iommu_ops intel_iommu_ops;
418 static bool translation_pre_enabled(struct intel_iommu *iommu)
420 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
423 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
425 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
428 static void init_translation_status(struct intel_iommu *iommu)
432 gsts = readl(iommu->reg + DMAR_GSTS_REG);
433 if (gsts & DMA_GSTS_TES)
434 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
437 static int __init intel_iommu_setup(char *str)
442 if (!strncmp(str, "on", 2)) {
444 pr_info("IOMMU enabled\n");
445 } else if (!strncmp(str, "off", 3)) {
447 no_platform_optin = 1;
448 pr_info("IOMMU disabled\n");
449 } else if (!strncmp(str, "igfx_off", 8)) {
451 pr_info("Disable GFX device mapping\n");
452 } else if (!strncmp(str, "forcedac", 8)) {
453 pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
454 iommu_dma_forcedac = true;
455 } else if (!strncmp(str, "strict", 6)) {
456 pr_info("Disable batched IOTLB flush\n");
457 intel_iommu_strict = 1;
458 } else if (!strncmp(str, "sp_off", 6)) {
459 pr_info("Disable supported super page\n");
460 intel_iommu_superpage = 0;
461 } else if (!strncmp(str, "sm_on", 5)) {
462 pr_info("Intel-IOMMU: scalable mode supported\n");
464 } else if (!strncmp(str, "tboot_noforce", 13)) {
465 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
466 intel_iommu_tboot_noforce = 1;
469 str += strcspn(str, ",");
475 __setup("intel_iommu=", intel_iommu_setup);
477 static struct kmem_cache *iommu_domain_cache;
478 static struct kmem_cache *iommu_devinfo_cache;
480 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
482 struct dmar_domain **domains;
485 domains = iommu->domains[idx];
489 return domains[did & 0xff];
492 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
493 struct dmar_domain *domain)
495 struct dmar_domain **domains;
498 if (!iommu->domains[idx]) {
499 size_t size = 256 * sizeof(struct dmar_domain *);
500 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
503 domains = iommu->domains[idx];
504 if (WARN_ON(!domains))
507 domains[did & 0xff] = domain;
510 void *alloc_pgtable_page(int node)
515 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
517 vaddr = page_address(page);
521 void free_pgtable_page(void *vaddr)
523 free_page((unsigned long)vaddr);
526 static inline void *alloc_domain_mem(void)
528 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
531 static void free_domain_mem(void *vaddr)
533 kmem_cache_free(iommu_domain_cache, vaddr);
536 static inline void * alloc_devinfo_mem(void)
538 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
541 static inline void free_devinfo_mem(void *vaddr)
543 kmem_cache_free(iommu_devinfo_cache, vaddr);
546 static inline int domain_type_is_si(struct dmar_domain *domain)
548 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
551 static inline bool domain_use_first_level(struct dmar_domain *domain)
553 return domain->flags & DOMAIN_FLAG_USE_FIRST_LEVEL;
556 static inline int domain_pfn_supported(struct dmar_domain *domain,
559 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
561 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
564 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
569 sagaw = cap_sagaw(iommu->cap);
570 for (agaw = width_to_agaw(max_gaw);
572 if (test_bit(agaw, &sagaw))
580 * Calculate max SAGAW for each iommu.
582 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
584 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
588 * calculate agaw for each iommu.
589 * "SAGAW" may be different across iommus, use a default agaw, and
590 * get a supported less agaw for iommus that don't support the default agaw.
592 int iommu_calculate_agaw(struct intel_iommu *iommu)
594 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
597 /* This functionin only returns single iommu in a domain */
598 struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
602 /* si_domain and vm domain should not get here. */
603 if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
606 for_each_domain_iommu(iommu_id, domain)
609 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
612 return g_iommus[iommu_id];
615 static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
617 return sm_supported(iommu) ?
618 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
621 static void domain_update_iommu_coherency(struct dmar_domain *domain)
623 struct dmar_drhd_unit *drhd;
624 struct intel_iommu *iommu;
628 domain->iommu_coherency = 1;
630 for_each_domain_iommu(i, domain) {
632 if (!iommu_paging_structure_coherency(g_iommus[i])) {
633 domain->iommu_coherency = 0;
640 /* No hardware attached; use lowest common denominator */
642 for_each_active_iommu(iommu, drhd) {
643 if (!iommu_paging_structure_coherency(iommu)) {
644 domain->iommu_coherency = 0;
651 static int domain_update_iommu_snooping(struct intel_iommu *skip)
653 struct dmar_drhd_unit *drhd;
654 struct intel_iommu *iommu;
658 for_each_active_iommu(iommu, drhd) {
661 * If the hardware is operating in the scalable mode,
662 * the snooping control is always supported since we
663 * always set PASID-table-entry.PGSNP bit if the domain
664 * is managed outside (UNMANAGED).
666 if (!sm_supported(iommu) &&
667 !ecap_sc_support(iommu->ecap)) {
678 static int domain_update_iommu_superpage(struct dmar_domain *domain,
679 struct intel_iommu *skip)
681 struct dmar_drhd_unit *drhd;
682 struct intel_iommu *iommu;
685 if (!intel_iommu_superpage) {
689 /* set iommu_superpage to the smallest common denominator */
691 for_each_active_iommu(iommu, drhd) {
693 if (domain && domain_use_first_level(domain)) {
694 if (!cap_fl1gp_support(iommu->cap))
697 mask &= cap_super_page_val(iommu->cap);
709 static int domain_update_device_node(struct dmar_domain *domain)
711 struct device_domain_info *info;
712 int nid = NUMA_NO_NODE;
714 assert_spin_locked(&device_domain_lock);
716 if (list_empty(&domain->devices))
719 list_for_each_entry(info, &domain->devices, link) {
724 * There could possibly be multiple device numa nodes as devices
725 * within the same domain may sit behind different IOMMUs. There
726 * isn't perfect answer in such situation, so we select first
727 * come first served policy.
729 nid = dev_to_node(info->dev);
730 if (nid != NUMA_NO_NODE)
737 static void domain_update_iotlb(struct dmar_domain *domain);
739 /* Some capabilities may be different across iommus */
740 static void domain_update_iommu_cap(struct dmar_domain *domain)
742 domain_update_iommu_coherency(domain);
743 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
744 domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
747 * If RHSA is missing, we should default to the device numa domain
750 if (domain->nid == NUMA_NO_NODE)
751 domain->nid = domain_update_device_node(domain);
754 * First-level translation restricts the input-address to a
755 * canonical address (i.e., address bits 63:N have the same
756 * value as address bit [N-1], where N is 48-bits with 4-level
757 * paging and 57-bits with 5-level paging). Hence, skip bit
760 if (domain_use_first_level(domain))
761 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
763 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
765 domain_update_iotlb(domain);
768 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
771 struct root_entry *root = &iommu->root_entry[bus];
772 struct context_entry *context;
776 if (sm_supported(iommu)) {
784 context = phys_to_virt(*entry & VTD_PAGE_MASK);
786 unsigned long phy_addr;
790 context = alloc_pgtable_page(iommu->node);
794 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
795 phy_addr = virt_to_phys((void *)context);
796 *entry = phy_addr | 1;
797 __iommu_flush_cache(iommu, entry, sizeof(*entry));
799 return &context[devfn];
802 static bool attach_deferred(struct device *dev)
804 return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
808 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
809 * sub-hierarchy of a candidate PCI-PCI bridge
810 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
811 * @bridge: the candidate PCI-PCI bridge
813 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
816 is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
818 struct pci_dev *pdev, *pbridge;
820 if (!dev_is_pci(dev) || !dev_is_pci(bridge))
823 pdev = to_pci_dev(dev);
824 pbridge = to_pci_dev(bridge);
826 if (pbridge->subordinate &&
827 pbridge->subordinate->number <= pdev->bus->number &&
828 pbridge->subordinate->busn_res.end >= pdev->bus->number)
834 static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
836 struct dmar_drhd_unit *drhd;
840 /* We know that this device on this chipset has its own IOMMU.
841 * If we find it under a different IOMMU, then the BIOS is lying
842 * to us. Hope that the IOMMU for this device is actually
843 * disabled, and it needs no translation...
845 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
848 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
853 /* we know that the this iommu should be at offset 0xa000 from vtbar */
854 drhd = dmar_find_matched_drhd_unit(pdev);
855 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
856 pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
857 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
864 static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
866 if (!iommu || iommu->drhd->ignored)
869 if (dev_is_pci(dev)) {
870 struct pci_dev *pdev = to_pci_dev(dev);
872 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
873 pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
874 quirk_ioat_snb_local_iommu(pdev))
881 struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
883 struct dmar_drhd_unit *drhd = NULL;
884 struct pci_dev *pdev = NULL;
885 struct intel_iommu *iommu;
893 if (dev_is_pci(dev)) {
894 struct pci_dev *pf_pdev;
896 pdev = pci_real_dma_dev(to_pci_dev(dev));
898 /* VFs aren't listed in scope tables; we need to look up
899 * the PF instead to find the IOMMU. */
900 pf_pdev = pci_physfn(pdev);
902 segment = pci_domain_nr(pdev->bus);
903 } else if (has_acpi_companion(dev))
904 dev = &ACPI_COMPANION(dev)->dev;
907 for_each_iommu(iommu, drhd) {
908 if (pdev && segment != drhd->segment)
911 for_each_active_dev_scope(drhd->devices,
912 drhd->devices_cnt, i, tmp) {
914 /* For a VF use its original BDF# not that of the PF
915 * which we used for the IOMMU lookup. Strictly speaking
916 * we could do this for all PCI devices; we only need to
917 * get the BDF# from the scope table for ACPI matches. */
918 if (pdev && pdev->is_virtfn)
922 *bus = drhd->devices[i].bus;
923 *devfn = drhd->devices[i].devfn;
928 if (is_downstream_to_pci_bridge(dev, tmp))
932 if (pdev && drhd->include_all) {
935 *bus = pdev->bus->number;
936 *devfn = pdev->devfn;
943 if (iommu_is_dummy(iommu, dev))
951 static void domain_flush_cache(struct dmar_domain *domain,
952 void *addr, int size)
954 if (!domain->iommu_coherency)
955 clflush_cache_range(addr, size);
958 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
960 struct context_entry *context;
964 spin_lock_irqsave(&iommu->lock, flags);
965 context = iommu_context_addr(iommu, bus, devfn, 0);
967 ret = context_present(context);
968 spin_unlock_irqrestore(&iommu->lock, flags);
972 static void free_context_table(struct intel_iommu *iommu)
976 struct context_entry *context;
978 spin_lock_irqsave(&iommu->lock, flags);
979 if (!iommu->root_entry) {
982 for (i = 0; i < ROOT_ENTRY_NR; i++) {
983 context = iommu_context_addr(iommu, i, 0, 0);
985 free_pgtable_page(context);
987 if (!sm_supported(iommu))
990 context = iommu_context_addr(iommu, i, 0x80, 0);
992 free_pgtable_page(context);
995 free_pgtable_page(iommu->root_entry);
996 iommu->root_entry = NULL;
998 spin_unlock_irqrestore(&iommu->lock, flags);
1001 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1002 unsigned long pfn, int *target_level)
1004 struct dma_pte *parent, *pte;
1005 int level = agaw_to_level(domain->agaw);
1008 BUG_ON(!domain->pgd);
1010 if (!domain_pfn_supported(domain, pfn))
1011 /* Address beyond IOMMU's addressing capabilities. */
1014 parent = domain->pgd;
1019 offset = pfn_level_offset(pfn, level);
1020 pte = &parent[offset];
1021 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1023 if (level == *target_level)
1026 if (!dma_pte_present(pte)) {
1029 tmp_page = alloc_pgtable_page(domain->nid);
1034 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1035 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1036 if (domain_use_first_level(domain)) {
1037 pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
1038 if (domain->domain.type == IOMMU_DOMAIN_DMA)
1039 pteval |= DMA_FL_PTE_ACCESS;
1041 if (cmpxchg64(&pte->val, 0ULL, pteval))
1042 /* Someone else set it while we were thinking; use theirs. */
1043 free_pgtable_page(tmp_page);
1045 domain_flush_cache(domain, pte, sizeof(*pte));
1050 parent = phys_to_virt(dma_pte_addr(pte));
1055 *target_level = level;
1060 /* return address's pte at specific level */
1061 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1063 int level, int *large_page)
1065 struct dma_pte *parent, *pte;
1066 int total = agaw_to_level(domain->agaw);
1069 parent = domain->pgd;
1070 while (level <= total) {
1071 offset = pfn_level_offset(pfn, total);
1072 pte = &parent[offset];
1076 if (!dma_pte_present(pte)) {
1077 *large_page = total;
1081 if (dma_pte_superpage(pte)) {
1082 *large_page = total;
1086 parent = phys_to_virt(dma_pte_addr(pte));
1092 /* clear last level pte, a tlb flush should be followed */
1093 static void dma_pte_clear_range(struct dmar_domain *domain,
1094 unsigned long start_pfn,
1095 unsigned long last_pfn)
1097 unsigned int large_page;
1098 struct dma_pte *first_pte, *pte;
1100 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1101 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1102 BUG_ON(start_pfn > last_pfn);
1104 /* we don't need lock here; nobody else touches the iova range */
1107 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1109 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1114 start_pfn += lvl_to_nr_pages(large_page);
1116 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1118 domain_flush_cache(domain, first_pte,
1119 (void *)pte - (void *)first_pte);
1121 } while (start_pfn && start_pfn <= last_pfn);
1124 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1125 int retain_level, struct dma_pte *pte,
1126 unsigned long pfn, unsigned long start_pfn,
1127 unsigned long last_pfn)
1129 pfn = max(start_pfn, pfn);
1130 pte = &pte[pfn_level_offset(pfn, level)];
1133 unsigned long level_pfn;
1134 struct dma_pte *level_pte;
1136 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1139 level_pfn = pfn & level_mask(level);
1140 level_pte = phys_to_virt(dma_pte_addr(pte));
1143 dma_pte_free_level(domain, level - 1, retain_level,
1144 level_pte, level_pfn, start_pfn,
1149 * Free the page table if we're below the level we want to
1150 * retain and the range covers the entire table.
1152 if (level < retain_level && !(start_pfn > level_pfn ||
1153 last_pfn < level_pfn + level_size(level) - 1)) {
1155 domain_flush_cache(domain, pte, sizeof(*pte));
1156 free_pgtable_page(level_pte);
1159 pfn += level_size(level);
1160 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1164 * clear last level (leaf) ptes and free page table pages below the
1165 * level we wish to keep intact.
1167 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1168 unsigned long start_pfn,
1169 unsigned long last_pfn,
1172 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1173 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1174 BUG_ON(start_pfn > last_pfn);
1176 dma_pte_clear_range(domain, start_pfn, last_pfn);
1178 /* We don't need lock here; nobody else touches the iova range */
1179 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
1180 domain->pgd, 0, start_pfn, last_pfn);
1183 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1184 free_pgtable_page(domain->pgd);
1189 /* When a page at a given level is being unlinked from its parent, we don't
1190 need to *modify* it at all. All we need to do is make a list of all the
1191 pages which can be freed just as soon as we've flushed the IOTLB and we
1192 know the hardware page-walk will no longer touch them.
1193 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1195 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1196 int level, struct dma_pte *pte,
1197 struct page *freelist)
1201 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1202 pg->freelist = freelist;
1208 pte = page_address(pg);
1210 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1211 freelist = dma_pte_list_pagetables(domain, level - 1,
1214 } while (!first_pte_in_page(pte));
1219 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1220 struct dma_pte *pte, unsigned long pfn,
1221 unsigned long start_pfn,
1222 unsigned long last_pfn,
1223 struct page *freelist)
1225 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1227 pfn = max(start_pfn, pfn);
1228 pte = &pte[pfn_level_offset(pfn, level)];
1231 unsigned long level_pfn;
1233 if (!dma_pte_present(pte))
1236 level_pfn = pfn & level_mask(level);
1238 /* If range covers entire pagetable, free it */
1239 if (start_pfn <= level_pfn &&
1240 last_pfn >= level_pfn + level_size(level) - 1) {
1241 /* These suborbinate page tables are going away entirely. Don't
1242 bother to clear them; we're just going to *free* them. */
1243 if (level > 1 && !dma_pte_superpage(pte))
1244 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1250 } else if (level > 1) {
1251 /* Recurse down into a level that isn't *entirely* obsolete */
1252 freelist = dma_pte_clear_level(domain, level - 1,
1253 phys_to_virt(dma_pte_addr(pte)),
1254 level_pfn, start_pfn, last_pfn,
1258 pfn += level_size(level);
1259 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1262 domain_flush_cache(domain, first_pte,
1263 (void *)++last_pte - (void *)first_pte);
1268 /* We can't just free the pages because the IOMMU may still be walking
1269 the page tables, and may have cached the intermediate levels. The
1270 pages can only be freed after the IOTLB flush has been done. */
1271 static struct page *domain_unmap(struct dmar_domain *domain,
1272 unsigned long start_pfn,
1273 unsigned long last_pfn,
1274 struct page *freelist)
1276 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1277 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1278 BUG_ON(start_pfn > last_pfn);
1280 /* we don't need lock here; nobody else touches the iova range */
1281 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1282 domain->pgd, 0, start_pfn, last_pfn,
1286 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1287 struct page *pgd_page = virt_to_page(domain->pgd);
1288 pgd_page->freelist = freelist;
1289 freelist = pgd_page;
1297 static void dma_free_pagelist(struct page *freelist)
1301 while ((pg = freelist)) {
1302 freelist = pg->freelist;
1303 free_pgtable_page(page_address(pg));
1307 /* iommu handling */
1308 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1310 struct root_entry *root;
1311 unsigned long flags;
1313 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1315 pr_err("Allocating root entry for %s failed\n",
1320 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1322 spin_lock_irqsave(&iommu->lock, flags);
1323 iommu->root_entry = root;
1324 spin_unlock_irqrestore(&iommu->lock, flags);
1329 static void iommu_set_root_entry(struct intel_iommu *iommu)
1335 addr = virt_to_phys(iommu->root_entry);
1336 if (sm_supported(iommu))
1337 addr |= DMA_RTADDR_SMT;
1339 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1340 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1342 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1344 /* Make sure hardware complete it */
1345 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1346 readl, (sts & DMA_GSTS_RTPS), sts);
1348 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1350 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
1351 if (sm_supported(iommu))
1352 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
1353 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1356 void iommu_flush_write_buffer(struct intel_iommu *iommu)
1361 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1364 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1365 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1367 /* Make sure hardware complete it */
1368 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1369 readl, (!(val & DMA_GSTS_WBFS)), val);
1371 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1374 /* return value determine if we need a write buffer flush */
1375 static void __iommu_flush_context(struct intel_iommu *iommu,
1376 u16 did, u16 source_id, u8 function_mask,
1383 case DMA_CCMD_GLOBAL_INVL:
1384 val = DMA_CCMD_GLOBAL_INVL;
1386 case DMA_CCMD_DOMAIN_INVL:
1387 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1389 case DMA_CCMD_DEVICE_INVL:
1390 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1391 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1396 val |= DMA_CCMD_ICC;
1398 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1399 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1401 /* Make sure hardware complete it */
1402 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1403 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1405 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1408 /* return value determine if we need a write buffer flush */
1409 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1410 u64 addr, unsigned int size_order, u64 type)
1412 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1413 u64 val = 0, val_iva = 0;
1417 case DMA_TLB_GLOBAL_FLUSH:
1418 /* global flush doesn't need set IVA_REG */
1419 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1421 case DMA_TLB_DSI_FLUSH:
1422 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1424 case DMA_TLB_PSI_FLUSH:
1425 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1426 /* IH bit is passed in as part of address */
1427 val_iva = size_order | addr;
1432 /* Note: set drain read/write */
1435 * This is probably to be super secure.. Looks like we can
1436 * ignore it without any impact.
1438 if (cap_read_drain(iommu->cap))
1439 val |= DMA_TLB_READ_DRAIN;
1441 if (cap_write_drain(iommu->cap))
1442 val |= DMA_TLB_WRITE_DRAIN;
1444 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1445 /* Note: Only uses first TLB reg currently */
1447 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1448 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1450 /* Make sure hardware complete it */
1451 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1452 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1454 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1456 /* check IOTLB invalidation granularity */
1457 if (DMA_TLB_IAIG(val) == 0)
1458 pr_err("Flush IOTLB failed\n");
1459 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1460 pr_debug("TLB flush request %Lx, actual %Lx\n",
1461 (unsigned long long)DMA_TLB_IIRG(type),
1462 (unsigned long long)DMA_TLB_IAIG(val));
1465 static struct device_domain_info *
1466 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1469 struct device_domain_info *info;
1471 assert_spin_locked(&device_domain_lock);
1476 list_for_each_entry(info, &domain->devices, link)
1477 if (info->iommu == iommu && info->bus == bus &&
1478 info->devfn == devfn) {
1479 if (info->ats_supported && info->dev)
1487 static void domain_update_iotlb(struct dmar_domain *domain)
1489 struct device_domain_info *info;
1490 bool has_iotlb_device = false;
1492 assert_spin_locked(&device_domain_lock);
1494 list_for_each_entry(info, &domain->devices, link)
1495 if (info->ats_enabled) {
1496 has_iotlb_device = true;
1500 if (!has_iotlb_device) {
1501 struct subdev_domain_info *sinfo;
1503 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
1504 info = get_domain_info(sinfo->pdev);
1505 if (info && info->ats_enabled) {
1506 has_iotlb_device = true;
1512 domain->has_iotlb_device = has_iotlb_device;
1515 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1517 struct pci_dev *pdev;
1519 assert_spin_locked(&device_domain_lock);
1521 if (!info || !dev_is_pci(info->dev))
1524 pdev = to_pci_dev(info->dev);
1525 /* For IOMMU that supports device IOTLB throttling (DIT), we assign
1526 * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
1527 * queue depth at PF level. If DIT is not set, PFSID will be treated as
1528 * reserved, which should be set to 0.
1530 if (!ecap_dit(info->iommu->ecap))
1533 struct pci_dev *pf_pdev;
1535 /* pdev will be returned if device is not a vf */
1536 pf_pdev = pci_physfn(pdev);
1537 info->pfsid = pci_dev_id(pf_pdev);
1540 #ifdef CONFIG_INTEL_IOMMU_SVM
1541 /* The PCIe spec, in its wisdom, declares that the behaviour of
1542 the device if you enable PASID support after ATS support is
1543 undefined. So always enable PASID support on devices which
1544 have it, even if we can't yet know if we're ever going to
1546 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1547 info->pasid_enabled = 1;
1549 if (info->pri_supported &&
1550 (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) &&
1551 !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1552 info->pri_enabled = 1;
1554 if (info->ats_supported && pci_ats_page_aligned(pdev) &&
1555 !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1556 info->ats_enabled = 1;
1557 domain_update_iotlb(info->domain);
1558 info->ats_qdep = pci_ats_queue_depth(pdev);
1562 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1564 struct pci_dev *pdev;
1566 assert_spin_locked(&device_domain_lock);
1568 if (!dev_is_pci(info->dev))
1571 pdev = to_pci_dev(info->dev);
1573 if (info->ats_enabled) {
1574 pci_disable_ats(pdev);
1575 info->ats_enabled = 0;
1576 domain_update_iotlb(info->domain);
1578 #ifdef CONFIG_INTEL_IOMMU_SVM
1579 if (info->pri_enabled) {
1580 pci_disable_pri(pdev);
1581 info->pri_enabled = 0;
1583 if (info->pasid_enabled) {
1584 pci_disable_pasid(pdev);
1585 info->pasid_enabled = 0;
1590 static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
1591 u64 addr, unsigned int mask)
1595 if (!info || !info->ats_enabled)
1598 sid = info->bus << 8 | info->devfn;
1599 qdep = info->ats_qdep;
1600 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
1604 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1605 u64 addr, unsigned mask)
1607 unsigned long flags;
1608 struct device_domain_info *info;
1609 struct subdev_domain_info *sinfo;
1611 if (!domain->has_iotlb_device)
1614 spin_lock_irqsave(&device_domain_lock, flags);
1615 list_for_each_entry(info, &domain->devices, link)
1616 __iommu_flush_dev_iotlb(info, addr, mask);
1618 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
1619 info = get_domain_info(sinfo->pdev);
1620 __iommu_flush_dev_iotlb(info, addr, mask);
1622 spin_unlock_irqrestore(&device_domain_lock, flags);
1625 static void domain_flush_piotlb(struct intel_iommu *iommu,
1626 struct dmar_domain *domain,
1627 u64 addr, unsigned long npages, bool ih)
1629 u16 did = domain->iommu_did[iommu->seq_id];
1631 if (domain->default_pasid)
1632 qi_flush_piotlb(iommu, did, domain->default_pasid,
1635 if (!list_empty(&domain->devices))
1636 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
1639 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1640 struct dmar_domain *domain,
1641 unsigned long pfn, unsigned int pages,
1644 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1645 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1646 u16 did = domain->iommu_did[iommu->seq_id];
1653 if (domain_use_first_level(domain)) {
1654 domain_flush_piotlb(iommu, domain, addr, pages, ih);
1657 * Fallback to domain selective flush if no PSI support or
1658 * the size is too big. PSI requires page size to be 2 ^ x,
1659 * and the base address is naturally aligned to the size.
1661 if (!cap_pgsel_inv(iommu->cap) ||
1662 mask > cap_max_amask_val(iommu->cap))
1663 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1666 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1671 * In caching mode, changes of pages from non-present to present require
1672 * flush. However, device IOTLB doesn't need to be flushed in this case.
1674 if (!cap_caching_mode(iommu->cap) || !map)
1675 iommu_flush_dev_iotlb(domain, addr, mask);
1678 /* Notification for newly created mappings */
1679 static inline void __mapping_notify_one(struct intel_iommu *iommu,
1680 struct dmar_domain *domain,
1681 unsigned long pfn, unsigned int pages)
1684 * It's a non-present to present mapping. Only flush if caching mode
1687 if (cap_caching_mode(iommu->cap) && !domain_use_first_level(domain))
1688 iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
1690 iommu_flush_write_buffer(iommu);
1693 static void intel_flush_iotlb_all(struct iommu_domain *domain)
1695 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
1698 for_each_domain_iommu(idx, dmar_domain) {
1699 struct intel_iommu *iommu = g_iommus[idx];
1700 u16 did = dmar_domain->iommu_did[iommu->seq_id];
1702 if (domain_use_first_level(dmar_domain))
1703 domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0);
1705 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1708 if (!cap_caching_mode(iommu->cap))
1709 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1710 0, MAX_AGAW_PFN_WIDTH);
1714 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1717 unsigned long flags;
1719 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1722 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1723 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1724 pmen &= ~DMA_PMEN_EPM;
1725 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1727 /* wait for the protected region status bit to clear */
1728 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1729 readl, !(pmen & DMA_PMEN_PRS), pmen);
1731 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1734 static void iommu_enable_translation(struct intel_iommu *iommu)
1737 unsigned long flags;
1739 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1740 iommu->gcmd |= DMA_GCMD_TE;
1741 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1743 /* Make sure hardware complete it */
1744 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1745 readl, (sts & DMA_GSTS_TES), sts);
1747 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1750 static void iommu_disable_translation(struct intel_iommu *iommu)
1755 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
1756 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
1759 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1760 iommu->gcmd &= ~DMA_GCMD_TE;
1761 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1763 /* Make sure hardware complete it */
1764 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1765 readl, (!(sts & DMA_GSTS_TES)), sts);
1767 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1770 static int iommu_init_domains(struct intel_iommu *iommu)
1772 u32 ndomains, nlongs;
1775 ndomains = cap_ndoms(iommu->cap);
1776 pr_debug("%s: Number of Domains supported <%d>\n",
1777 iommu->name, ndomains);
1778 nlongs = BITS_TO_LONGS(ndomains);
1780 spin_lock_init(&iommu->lock);
1782 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1783 if (!iommu->domain_ids) {
1784 pr_err("%s: Allocating domain id array failed\n",
1789 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1790 iommu->domains = kzalloc(size, GFP_KERNEL);
1792 if (iommu->domains) {
1793 size = 256 * sizeof(struct dmar_domain *);
1794 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1797 if (!iommu->domains || !iommu->domains[0]) {
1798 pr_err("%s: Allocating domain array failed\n",
1800 kfree(iommu->domain_ids);
1801 kfree(iommu->domains);
1802 iommu->domain_ids = NULL;
1803 iommu->domains = NULL;
1808 * If Caching mode is set, then invalid translations are tagged
1809 * with domain-id 0, hence we need to pre-allocate it. We also
1810 * use domain-id 0 as a marker for non-allocated domain-id, so
1811 * make sure it is not used for a real domain.
1813 set_bit(0, iommu->domain_ids);
1816 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1817 * entry for first-level or pass-through translation modes should
1818 * be programmed with a domain id different from those used for
1819 * second-level or nested translation. We reserve a domain id for
1822 if (sm_supported(iommu))
1823 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids);
1828 static void disable_dmar_iommu(struct intel_iommu *iommu)
1830 struct device_domain_info *info, *tmp;
1831 unsigned long flags;
1833 if (!iommu->domains || !iommu->domain_ids)
1836 spin_lock_irqsave(&device_domain_lock, flags);
1837 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1838 if (info->iommu != iommu)
1841 if (!info->dev || !info->domain)
1844 __dmar_remove_one_dev_info(info);
1846 spin_unlock_irqrestore(&device_domain_lock, flags);
1848 if (iommu->gcmd & DMA_GCMD_TE)
1849 iommu_disable_translation(iommu);
1852 static void free_dmar_iommu(struct intel_iommu *iommu)
1854 if ((iommu->domains) && (iommu->domain_ids)) {
1855 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1858 for (i = 0; i < elems; i++)
1859 kfree(iommu->domains[i]);
1860 kfree(iommu->domains);
1861 kfree(iommu->domain_ids);
1862 iommu->domains = NULL;
1863 iommu->domain_ids = NULL;
1866 g_iommus[iommu->seq_id] = NULL;
1868 /* free context mapping */
1869 free_context_table(iommu);
1871 #ifdef CONFIG_INTEL_IOMMU_SVM
1872 if (pasid_supported(iommu)) {
1873 if (ecap_prs(iommu->ecap))
1874 intel_svm_finish_prq(iommu);
1876 if (vccap_pasid(iommu->vccap))
1877 ioasid_unregister_allocator(&iommu->pasid_allocator);
1883 * Check and return whether first level is used by default for
1886 static bool first_level_by_default(void)
1888 return scalable_mode_support() && intel_cap_flts_sanity();
1891 static struct dmar_domain *alloc_domain(int flags)
1893 struct dmar_domain *domain;
1895 domain = alloc_domain_mem();
1899 memset(domain, 0, sizeof(*domain));
1900 domain->nid = NUMA_NO_NODE;
1901 domain->flags = flags;
1902 if (first_level_by_default())
1903 domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
1904 domain->has_iotlb_device = false;
1905 INIT_LIST_HEAD(&domain->devices);
1906 INIT_LIST_HEAD(&domain->subdevices);
1911 /* Must be called with iommu->lock */
1912 static int domain_attach_iommu(struct dmar_domain *domain,
1913 struct intel_iommu *iommu)
1915 unsigned long ndomains;
1918 assert_spin_locked(&device_domain_lock);
1919 assert_spin_locked(&iommu->lock);
1921 domain->iommu_refcnt[iommu->seq_id] += 1;
1922 domain->iommu_count += 1;
1923 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1924 ndomains = cap_ndoms(iommu->cap);
1925 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1927 if (num >= ndomains) {
1928 pr_err("%s: No free domain ids\n", iommu->name);
1929 domain->iommu_refcnt[iommu->seq_id] -= 1;
1930 domain->iommu_count -= 1;
1934 set_bit(num, iommu->domain_ids);
1935 set_iommu_domain(iommu, num, domain);
1937 domain->iommu_did[iommu->seq_id] = num;
1938 domain->nid = iommu->node;
1940 domain_update_iommu_cap(domain);
1946 static int domain_detach_iommu(struct dmar_domain *domain,
1947 struct intel_iommu *iommu)
1951 assert_spin_locked(&device_domain_lock);
1952 assert_spin_locked(&iommu->lock);
1954 domain->iommu_refcnt[iommu->seq_id] -= 1;
1955 count = --domain->iommu_count;
1956 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1957 num = domain->iommu_did[iommu->seq_id];
1958 clear_bit(num, iommu->domain_ids);
1959 set_iommu_domain(iommu, num, NULL);
1961 domain_update_iommu_cap(domain);
1962 domain->iommu_did[iommu->seq_id] = 0;
1968 static inline int guestwidth_to_adjustwidth(int gaw)
1971 int r = (gaw - 12) % 9;
1982 static void domain_exit(struct dmar_domain *domain)
1985 /* Remove associated devices and clear attached or cached domains */
1986 domain_remove_dev_info(domain);
1989 if (domain->domain.type == IOMMU_DOMAIN_DMA)
1990 iommu_put_dma_cookie(&domain->domain);
1993 struct page *freelist;
1995 freelist = domain_unmap(domain, 0,
1996 DOMAIN_MAX_PFN(domain->gaw), NULL);
1997 dma_free_pagelist(freelist);
2000 free_domain_mem(domain);
2004 * Get the PASID directory size for scalable mode context entry.
2005 * Value of X in the PDTS field of a scalable mode context entry
2006 * indicates PASID directory with 2^(X + 7) entries.
2008 static inline unsigned long context_get_sm_pds(struct pasid_table *table)
2012 max_pde = table->max_pasid >> PASID_PDE_SHIFT;
2013 pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
2021 * Set the RID_PASID field of a scalable mode context entry. The
2022 * IOMMU hardware will use the PASID value set in this field for
2023 * DMA translations of DMA requests without PASID.
2026 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
2028 context->hi |= pasid & ((1 << 20) - 1);
2032 * Set the DTE(Device-TLB Enable) field of a scalable mode context
2035 static inline void context_set_sm_dte(struct context_entry *context)
2037 context->lo |= (1 << 2);
2041 * Set the PRE(Page Request Enable) field of a scalable mode context
2044 static inline void context_set_sm_pre(struct context_entry *context)
2046 context->lo |= (1 << 4);
2049 /* Convert value to context PASID directory size field coding. */
2050 #define context_pdts(pds) (((pds) & 0x7) << 9)
2052 static int domain_context_mapping_one(struct dmar_domain *domain,
2053 struct intel_iommu *iommu,
2054 struct pasid_table *table,
2057 u16 did = domain->iommu_did[iommu->seq_id];
2058 int translation = CONTEXT_TT_MULTI_LEVEL;
2059 struct device_domain_info *info = NULL;
2060 struct context_entry *context;
2061 unsigned long flags;
2066 if (hw_pass_through && domain_type_is_si(domain))
2067 translation = CONTEXT_TT_PASS_THROUGH;
2069 pr_debug("Set context mapping for %02x:%02x.%d\n",
2070 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2072 BUG_ON(!domain->pgd);
2074 spin_lock_irqsave(&device_domain_lock, flags);
2075 spin_lock(&iommu->lock);
2078 context = iommu_context_addr(iommu, bus, devfn, 1);
2083 if (context_present(context))
2087 * For kdump cases, old valid entries may be cached due to the
2088 * in-flight DMA and copied pgtable, but there is no unmapping
2089 * behaviour for them, thus we need an explicit cache flush for
2090 * the newly-mapped device. For kdump, at this point, the device
2091 * is supposed to finish reset at its driver probe stage, so no
2092 * in-flight DMA will exist, and we don't need to worry anymore
2095 if (context_copied(context)) {
2096 u16 did_old = context_domain_id(context);
2098 if (did_old < cap_ndoms(iommu->cap)) {
2099 iommu->flush.flush_context(iommu, did_old,
2100 (((u16)bus) << 8) | devfn,
2101 DMA_CCMD_MASK_NOBIT,
2102 DMA_CCMD_DEVICE_INVL);
2103 iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2108 context_clear_entry(context);
2110 if (sm_supported(iommu)) {
2115 /* Setup the PASID DIR pointer: */
2116 pds = context_get_sm_pds(table);
2117 context->lo = (u64)virt_to_phys(table->table) |
2120 /* Setup the RID_PASID field: */
2121 context_set_sm_rid2pasid(context, PASID_RID2PASID);
2124 * Setup the Device-TLB enable bit and Page request
2127 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2128 if (info && info->ats_supported)
2129 context_set_sm_dte(context);
2130 if (info && info->pri_supported)
2131 context_set_sm_pre(context);
2133 struct dma_pte *pgd = domain->pgd;
2136 context_set_domain_id(context, did);
2138 if (translation != CONTEXT_TT_PASS_THROUGH) {
2140 * Skip top levels of page tables for iommu which has
2141 * less agaw than default. Unnecessary for PT mode.
2143 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2145 pgd = phys_to_virt(dma_pte_addr(pgd));
2146 if (!dma_pte_present(pgd))
2150 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2151 if (info && info->ats_supported)
2152 translation = CONTEXT_TT_DEV_IOTLB;
2154 translation = CONTEXT_TT_MULTI_LEVEL;
2156 context_set_address_root(context, virt_to_phys(pgd));
2157 context_set_address_width(context, agaw);
2160 * In pass through mode, AW must be programmed to
2161 * indicate the largest AGAW value supported by
2162 * hardware. And ASR is ignored by hardware.
2164 context_set_address_width(context, iommu->msagaw);
2167 context_set_translation_type(context, translation);
2170 context_set_fault_enable(context);
2171 context_set_present(context);
2172 if (!ecap_coherent(iommu->ecap))
2173 clflush_cache_range(context, sizeof(*context));
2176 * It's a non-present to present mapping. If hardware doesn't cache
2177 * non-present entry we only need to flush the write-buffer. If the
2178 * _does_ cache non-present entries, then it does so in the special
2179 * domain #0, which we have to flush:
2181 if (cap_caching_mode(iommu->cap)) {
2182 iommu->flush.flush_context(iommu, 0,
2183 (((u16)bus) << 8) | devfn,
2184 DMA_CCMD_MASK_NOBIT,
2185 DMA_CCMD_DEVICE_INVL);
2186 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2188 iommu_flush_write_buffer(iommu);
2190 iommu_enable_dev_iotlb(info);
2195 spin_unlock(&iommu->lock);
2196 spin_unlock_irqrestore(&device_domain_lock, flags);
2201 struct domain_context_mapping_data {
2202 struct dmar_domain *domain;
2203 struct intel_iommu *iommu;
2204 struct pasid_table *table;
2207 static int domain_context_mapping_cb(struct pci_dev *pdev,
2208 u16 alias, void *opaque)
2210 struct domain_context_mapping_data *data = opaque;
2212 return domain_context_mapping_one(data->domain, data->iommu,
2213 data->table, PCI_BUS_NUM(alias),
2218 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2220 struct domain_context_mapping_data data;
2221 struct pasid_table *table;
2222 struct intel_iommu *iommu;
2225 iommu = device_to_iommu(dev, &bus, &devfn);
2229 table = intel_pasid_get_table(dev);
2231 if (!dev_is_pci(dev))
2232 return domain_context_mapping_one(domain, iommu, table,
2235 data.domain = domain;
2239 return pci_for_each_dma_alias(to_pci_dev(dev),
2240 &domain_context_mapping_cb, &data);
2243 static int domain_context_mapped_cb(struct pci_dev *pdev,
2244 u16 alias, void *opaque)
2246 struct intel_iommu *iommu = opaque;
2248 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2251 static int domain_context_mapped(struct device *dev)
2253 struct intel_iommu *iommu;
2256 iommu = device_to_iommu(dev, &bus, &devfn);
2260 if (!dev_is_pci(dev))
2261 return device_context_mapped(iommu, bus, devfn);
2263 return !pci_for_each_dma_alias(to_pci_dev(dev),
2264 domain_context_mapped_cb, iommu);
2267 /* Returns a number of VTD pages, but aligned to MM page size */
2268 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2271 host_addr &= ~PAGE_MASK;
2272 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2275 /* Return largest possible superpage level for a given mapping */
2276 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2277 unsigned long iov_pfn,
2278 unsigned long phy_pfn,
2279 unsigned long pages)
2281 int support, level = 1;
2282 unsigned long pfnmerge;
2284 support = domain->iommu_superpage;
2286 /* To use a large page, the virtual *and* physical addresses
2287 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2288 of them will mean we have to use smaller pages. So just
2289 merge them and check both at once. */
2290 pfnmerge = iov_pfn | phy_pfn;
2292 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2293 pages >>= VTD_STRIDE_SHIFT;
2296 pfnmerge >>= VTD_STRIDE_SHIFT;
2304 * Ensure that old small page tables are removed to make room for superpage(s).
2305 * We're going to add new large pages, so make sure we don't remove their parent
2306 * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
2308 static void switch_to_super_page(struct dmar_domain *domain,
2309 unsigned long start_pfn,
2310 unsigned long end_pfn, int level)
2312 unsigned long lvl_pages = lvl_to_nr_pages(level);
2313 struct dma_pte *pte = NULL;
2316 while (start_pfn <= end_pfn) {
2318 pte = pfn_to_dma_pte(domain, start_pfn, &level);
2320 if (dma_pte_present(pte)) {
2321 dma_pte_free_pagetable(domain, start_pfn,
2322 start_pfn + lvl_pages - 1,
2325 for_each_domain_iommu(i, domain)
2326 iommu_flush_iotlb_psi(g_iommus[i], domain,
2327 start_pfn, lvl_pages,
2332 start_pfn += lvl_pages;
2333 if (first_pte_in_page(pte))
2339 __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2340 unsigned long phys_pfn, unsigned long nr_pages, int prot)
2342 unsigned int largepage_lvl = 0;
2343 unsigned long lvl_pages = 0;
2344 struct dma_pte *pte = NULL;
2348 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2350 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2353 attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
2354 attr |= DMA_FL_PTE_PRESENT;
2355 if (domain_use_first_level(domain)) {
2356 attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
2358 if (domain->domain.type == IOMMU_DOMAIN_DMA) {
2359 attr |= DMA_FL_PTE_ACCESS;
2360 if (prot & DMA_PTE_WRITE)
2361 attr |= DMA_FL_PTE_DIRTY;
2365 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
2367 while (nr_pages > 0) {
2371 largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
2372 phys_pfn, nr_pages);
2374 pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2377 /* It is large page*/
2378 if (largepage_lvl > 1) {
2379 unsigned long end_pfn;
2381 pteval |= DMA_PTE_LARGE_PAGE;
2382 end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
2383 switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
2385 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2389 /* We don't need lock here, nobody else
2390 * touches the iova range
2392 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2394 static int dumps = 5;
2395 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2396 iov_pfn, tmp, (unsigned long long)pteval);
2399 debug_dma_dump_mappings(NULL);
2404 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2406 BUG_ON(nr_pages < lvl_pages);
2408 nr_pages -= lvl_pages;
2409 iov_pfn += lvl_pages;
2410 phys_pfn += lvl_pages;
2411 pteval += lvl_pages * VTD_PAGE_SIZE;
2413 /* If the next PTE would be the first in a new page, then we
2414 * need to flush the cache on the entries we've just written.
2415 * And then we'll need to recalculate 'pte', so clear it and
2416 * let it get set again in the if (!pte) block above.
2418 * If we're done (!nr_pages) we need to flush the cache too.
2420 * Also if we've been setting superpages, we may need to
2421 * recalculate 'pte' and switch back to smaller pages for the
2422 * end of the mapping, if the trailing size is not enough to
2423 * use another superpage (i.e. nr_pages < lvl_pages).
2425 * We leave clflush for the leaf pte changes to iotlb_sync_map()
2429 if (!nr_pages || first_pte_in_page(pte) ||
2430 (largepage_lvl > 1 && nr_pages < lvl_pages))
2437 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2439 unsigned long flags;
2440 struct context_entry *context;
2446 spin_lock_irqsave(&iommu->lock, flags);
2447 context = iommu_context_addr(iommu, bus, devfn, 0);
2449 spin_unlock_irqrestore(&iommu->lock, flags);
2452 did_old = context_domain_id(context);
2453 context_clear_entry(context);
2454 __iommu_flush_cache(iommu, context, sizeof(*context));
2455 spin_unlock_irqrestore(&iommu->lock, flags);
2456 iommu->flush.flush_context(iommu,
2458 (((u16)bus) << 8) | devfn,
2459 DMA_CCMD_MASK_NOBIT,
2460 DMA_CCMD_DEVICE_INVL);
2462 if (sm_supported(iommu))
2463 qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
2465 iommu->flush.flush_iotlb(iommu,
2472 static inline void unlink_domain_info(struct device_domain_info *info)
2474 assert_spin_locked(&device_domain_lock);
2475 list_del(&info->link);
2476 list_del(&info->global);
2478 dev_iommu_priv_set(info->dev, NULL);
2481 static void domain_remove_dev_info(struct dmar_domain *domain)
2483 struct device_domain_info *info, *tmp;
2484 unsigned long flags;
2486 spin_lock_irqsave(&device_domain_lock, flags);
2487 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2488 __dmar_remove_one_dev_info(info);
2489 spin_unlock_irqrestore(&device_domain_lock, flags);
2492 struct dmar_domain *find_domain(struct device *dev)
2494 struct device_domain_info *info;
2496 if (unlikely(!dev || !dev->iommu))
2499 if (unlikely(attach_deferred(dev)))
2502 /* No lock here, assumes no domain exit in normal case */
2503 info = get_domain_info(dev);
2505 return info->domain;
2510 static inline struct device_domain_info *
2511 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2513 struct device_domain_info *info;
2515 list_for_each_entry(info, &device_domain_list, global)
2516 if (info->segment == segment && info->bus == bus &&
2517 info->devfn == devfn)
2523 static int domain_setup_first_level(struct intel_iommu *iommu,
2524 struct dmar_domain *domain,
2528 int flags = PASID_FLAG_SUPERVISOR_MODE;
2529 struct dma_pte *pgd = domain->pgd;
2533 * Skip top levels of page tables for iommu which has
2534 * less agaw than default. Unnecessary for PT mode.
2536 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
2537 pgd = phys_to_virt(dma_pte_addr(pgd));
2538 if (!dma_pte_present(pgd))
2542 level = agaw_to_level(agaw);
2543 if (level != 4 && level != 5)
2546 flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
2548 if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
2549 flags |= PASID_FLAG_PAGE_SNOOP;
2551 return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
2552 domain->iommu_did[iommu->seq_id],
2556 static bool dev_is_real_dma_subdevice(struct device *dev)
2558 return dev && dev_is_pci(dev) &&
2559 pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
2562 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2565 struct dmar_domain *domain)
2567 struct dmar_domain *found = NULL;
2568 struct device_domain_info *info;
2569 unsigned long flags;
2572 info = alloc_devinfo_mem();
2576 if (!dev_is_real_dma_subdevice(dev)) {
2578 info->devfn = devfn;
2579 info->segment = iommu->segment;
2581 struct pci_dev *pdev = to_pci_dev(dev);
2583 info->bus = pdev->bus->number;
2584 info->devfn = pdev->devfn;
2585 info->segment = pci_domain_nr(pdev->bus);
2588 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2589 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2592 info->domain = domain;
2593 info->iommu = iommu;
2594 info->pasid_table = NULL;
2595 info->auxd_enabled = 0;
2596 INIT_LIST_HEAD(&info->subdevices);
2598 if (dev && dev_is_pci(dev)) {
2599 struct pci_dev *pdev = to_pci_dev(info->dev);
2601 if (ecap_dev_iotlb_support(iommu->ecap) &&
2602 pci_ats_supported(pdev) &&
2603 dmar_find_matched_atsr_unit(pdev))
2604 info->ats_supported = 1;
2606 if (sm_supported(iommu)) {
2607 if (pasid_supported(iommu)) {
2608 int features = pci_pasid_features(pdev);
2610 info->pasid_supported = features | 1;
2613 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2614 pci_pri_supported(pdev))
2615 info->pri_supported = 1;
2619 spin_lock_irqsave(&device_domain_lock, flags);
2621 found = find_domain(dev);
2624 struct device_domain_info *info2;
2625 info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
2628 found = info2->domain;
2634 spin_unlock_irqrestore(&device_domain_lock, flags);
2635 free_devinfo_mem(info);
2636 /* Caller must free the original domain */
2640 spin_lock(&iommu->lock);
2641 ret = domain_attach_iommu(domain, iommu);
2642 spin_unlock(&iommu->lock);
2645 spin_unlock_irqrestore(&device_domain_lock, flags);
2646 free_devinfo_mem(info);
2650 list_add(&info->link, &domain->devices);
2651 list_add(&info->global, &device_domain_list);
2653 dev_iommu_priv_set(dev, info);
2654 spin_unlock_irqrestore(&device_domain_lock, flags);
2656 /* PASID table is mandatory for a PCI device in scalable mode. */
2657 if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
2658 ret = intel_pasid_alloc_table(dev);
2660 dev_err(dev, "PASID table allocation failed\n");
2661 dmar_remove_one_dev_info(dev);
2665 /* Setup the PASID entry for requests without PASID: */
2666 spin_lock_irqsave(&iommu->lock, flags);
2667 if (hw_pass_through && domain_type_is_si(domain))
2668 ret = intel_pasid_setup_pass_through(iommu, domain,
2669 dev, PASID_RID2PASID);
2670 else if (domain_use_first_level(domain))
2671 ret = domain_setup_first_level(iommu, domain, dev,
2674 ret = intel_pasid_setup_second_level(iommu, domain,
2675 dev, PASID_RID2PASID);
2676 spin_unlock_irqrestore(&iommu->lock, flags);
2678 dev_err(dev, "Setup RID2PASID failed\n");
2679 dmar_remove_one_dev_info(dev);
2684 if (dev && domain_context_mapping(domain, dev)) {
2685 dev_err(dev, "Domain context map failed\n");
2686 dmar_remove_one_dev_info(dev);
2693 static int iommu_domain_identity_map(struct dmar_domain *domain,
2694 unsigned long first_vpfn,
2695 unsigned long last_vpfn)
2698 * RMRR range might have overlap with physical memory range,
2701 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2703 return __domain_mapping(domain, first_vpfn,
2704 first_vpfn, last_vpfn - first_vpfn + 1,
2705 DMA_PTE_READ|DMA_PTE_WRITE);
2708 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2710 static int __init si_domain_init(int hw)
2712 struct dmar_rmrr_unit *rmrr;
2716 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2720 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2721 domain_exit(si_domain);
2728 for_each_online_node(nid) {
2729 unsigned long start_pfn, end_pfn;
2732 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2733 ret = iommu_domain_identity_map(si_domain,
2734 mm_to_dma_pfn(start_pfn),
2735 mm_to_dma_pfn(end_pfn));
2742 * Identity map the RMRRs so that devices with RMRRs could also use
2745 for_each_rmrr_units(rmrr) {
2746 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
2748 unsigned long long start = rmrr->base_address;
2749 unsigned long long end = rmrr->end_address;
2751 if (WARN_ON(end < start ||
2752 end >> agaw_to_width(si_domain->agaw)))
2755 ret = iommu_domain_identity_map(si_domain,
2756 mm_to_dma_pfn(start >> PAGE_SHIFT),
2757 mm_to_dma_pfn(end >> PAGE_SHIFT));
2766 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2768 struct dmar_domain *ndomain;
2769 struct intel_iommu *iommu;
2772 iommu = device_to_iommu(dev, &bus, &devfn);
2776 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2777 if (ndomain != domain)
2783 static bool device_has_rmrr(struct device *dev)
2785 struct dmar_rmrr_unit *rmrr;
2790 for_each_rmrr_units(rmrr) {
2792 * Return TRUE if this RMRR contains the device that
2795 for_each_active_dev_scope(rmrr->devices,
2796 rmrr->devices_cnt, i, tmp)
2798 is_downstream_to_pci_bridge(dev, tmp)) {
2808 * device_rmrr_is_relaxable - Test whether the RMRR of this device
2809 * is relaxable (ie. is allowed to be not enforced under some conditions)
2810 * @dev: device handle
2812 * We assume that PCI USB devices with RMRRs have them largely
2813 * for historical reasons and that the RMRR space is not actively used post
2814 * boot. This exclusion may change if vendors begin to abuse it.
2816 * The same exception is made for graphics devices, with the requirement that
2817 * any use of the RMRR regions will be torn down before assigning the device
2820 * Return: true if the RMRR is relaxable, false otherwise
2822 static bool device_rmrr_is_relaxable(struct device *dev)
2824 struct pci_dev *pdev;
2826 if (!dev_is_pci(dev))
2829 pdev = to_pci_dev(dev);
2830 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2837 * There are a couple cases where we need to restrict the functionality of
2838 * devices associated with RMRRs. The first is when evaluating a device for
2839 * identity mapping because problems exist when devices are moved in and out
2840 * of domains and their respective RMRR information is lost. This means that
2841 * a device with associated RMRRs will never be in a "passthrough" domain.
2842 * The second is use of the device through the IOMMU API. This interface
2843 * expects to have full control of the IOVA space for the device. We cannot
2844 * satisfy both the requirement that RMRR access is maintained and have an
2845 * unencumbered IOVA space. We also have no ability to quiesce the device's
2846 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2847 * We therefore prevent devices associated with an RMRR from participating in
2848 * the IOMMU API, which eliminates them from device assignment.
2850 * In both cases, devices which have relaxable RMRRs are not concerned by this
2851 * restriction. See device_rmrr_is_relaxable comment.
2853 static bool device_is_rmrr_locked(struct device *dev)
2855 if (!device_has_rmrr(dev))
2858 if (device_rmrr_is_relaxable(dev))
2865 * Return the required default domain type for a specific device.
2867 * @dev: the device in query
2868 * @startup: true if this is during early boot
2871 * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
2872 * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
2873 * - 0: both identity and dynamic domains work for this device
2875 static int device_def_domain_type(struct device *dev)
2877 if (dev_is_pci(dev)) {
2878 struct pci_dev *pdev = to_pci_dev(dev);
2880 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2881 return IOMMU_DOMAIN_IDENTITY;
2883 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2884 return IOMMU_DOMAIN_IDENTITY;
2890 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2893 * Start from the sane iommu hardware state.
2894 * If the queued invalidation is already initialized by us
2895 * (for example, while enabling interrupt-remapping) then
2896 * we got the things already rolling from a sane state.
2900 * Clear any previous faults.
2902 dmar_fault(-1, iommu);
2904 * Disable queued invalidation if supported and already enabled
2905 * before OS handover.
2907 dmar_disable_qi(iommu);
2910 if (dmar_enable_qi(iommu)) {
2912 * Queued Invalidate not enabled, use Register Based Invalidate
2914 iommu->flush.flush_context = __iommu_flush_context;
2915 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2916 pr_info("%s: Using Register based invalidation\n",
2919 iommu->flush.flush_context = qi_flush_context;
2920 iommu->flush.flush_iotlb = qi_flush_iotlb;
2921 pr_info("%s: Using Queued invalidation\n", iommu->name);
2925 static int copy_context_table(struct intel_iommu *iommu,
2926 struct root_entry *old_re,
2927 struct context_entry **tbl,
2930 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2931 struct context_entry *new_ce = NULL, ce;
2932 struct context_entry *old_ce = NULL;
2933 struct root_entry re;
2934 phys_addr_t old_ce_phys;
2936 tbl_idx = ext ? bus * 2 : bus;
2937 memcpy(&re, old_re, sizeof(re));
2939 for (devfn = 0; devfn < 256; devfn++) {
2940 /* First calculate the correct index */
2941 idx = (ext ? devfn * 2 : devfn) % 256;
2944 /* First save what we may have and clean up */
2946 tbl[tbl_idx] = new_ce;
2947 __iommu_flush_cache(iommu, new_ce,
2957 old_ce_phys = root_entry_lctp(&re);
2959 old_ce_phys = root_entry_uctp(&re);
2962 if (ext && devfn == 0) {
2963 /* No LCTP, try UCTP */
2972 old_ce = memremap(old_ce_phys, PAGE_SIZE,
2977 new_ce = alloc_pgtable_page(iommu->node);
2984 /* Now copy the context entry */
2985 memcpy(&ce, old_ce + idx, sizeof(ce));
2987 if (!__context_present(&ce))
2990 did = context_domain_id(&ce);
2991 if (did >= 0 && did < cap_ndoms(iommu->cap))
2992 set_bit(did, iommu->domain_ids);
2995 * We need a marker for copied context entries. This
2996 * marker needs to work for the old format as well as
2997 * for extended context entries.
2999 * Bit 67 of the context entry is used. In the old
3000 * format this bit is available to software, in the
3001 * extended format it is the PGE bit, but PGE is ignored
3002 * by HW if PASIDs are disabled (and thus still
3005 * So disable PASIDs first and then mark the entry
3006 * copied. This means that we don't copy PASID
3007 * translations from the old kernel, but this is fine as
3008 * faults there are not fatal.
3010 context_clear_pasid_enable(&ce);
3011 context_set_copied(&ce);
3016 tbl[tbl_idx + pos] = new_ce;
3018 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3027 static int copy_translation_tables(struct intel_iommu *iommu)
3029 struct context_entry **ctxt_tbls;
3030 struct root_entry *old_rt;
3031 phys_addr_t old_rt_phys;
3032 int ctxt_table_entries;
3033 unsigned long flags;
3038 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3039 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3040 new_ext = !!ecap_ecs(iommu->ecap);
3043 * The RTT bit can only be changed when translation is disabled,
3044 * but disabling translation means to open a window for data
3045 * corruption. So bail out and don't copy anything if we would
3046 * have to change the bit.
3051 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3055 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3059 /* This is too big for the stack - allocate it from slab */
3060 ctxt_table_entries = ext ? 512 : 256;
3062 ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
3066 for (bus = 0; bus < 256; bus++) {
3067 ret = copy_context_table(iommu, &old_rt[bus],
3068 ctxt_tbls, bus, ext);
3070 pr_err("%s: Failed to copy context table for bus %d\n",
3076 spin_lock_irqsave(&iommu->lock, flags);
3078 /* Context tables are copied, now write them to the root_entry table */
3079 for (bus = 0; bus < 256; bus++) {
3080 int idx = ext ? bus * 2 : bus;
3083 if (ctxt_tbls[idx]) {
3084 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3085 iommu->root_entry[bus].lo = val;
3088 if (!ext || !ctxt_tbls[idx + 1])
3091 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3092 iommu->root_entry[bus].hi = val;
3095 spin_unlock_irqrestore(&iommu->lock, flags);
3099 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3109 #ifdef CONFIG_INTEL_IOMMU_SVM
3110 static ioasid_t intel_vcmd_ioasid_alloc(ioasid_t min, ioasid_t max, void *data)
3112 struct intel_iommu *iommu = data;
3116 return INVALID_IOASID;
3118 * VT-d virtual command interface always uses the full 20 bit
3119 * PASID range. Host can partition guest PASID range based on
3120 * policies but it is out of guest's control.
3122 if (min < PASID_MIN || max > intel_pasid_max_id)
3123 return INVALID_IOASID;
3125 if (vcmd_alloc_pasid(iommu, &ioasid))
3126 return INVALID_IOASID;
3131 static void intel_vcmd_ioasid_free(ioasid_t ioasid, void *data)
3133 struct intel_iommu *iommu = data;
3138 * Sanity check the ioasid owner is done at upper layer, e.g. VFIO
3139 * We can only free the PASID when all the devices are unbound.
3141 if (ioasid_find(NULL, ioasid, NULL)) {
3142 pr_alert("Cannot free active IOASID %d\n", ioasid);
3145 vcmd_free_pasid(iommu, ioasid);
3148 static void register_pasid_allocator(struct intel_iommu *iommu)
3151 * If we are running in the host, no need for custom allocator
3152 * in that PASIDs are allocated from the host system-wide.
3154 if (!cap_caching_mode(iommu->cap))
3157 if (!sm_supported(iommu)) {
3158 pr_warn("VT-d Scalable Mode not enabled, no PASID allocation\n");
3163 * Register a custom PASID allocator if we are running in a guest,
3164 * guest PASID must be obtained via virtual command interface.
3165 * There can be multiple vIOMMUs in each guest but only one allocator
3166 * is active. All vIOMMU allocators will eventually be calling the same
3169 if (!vccap_pasid(iommu->vccap))
3172 pr_info("Register custom PASID allocator\n");
3173 iommu->pasid_allocator.alloc = intel_vcmd_ioasid_alloc;
3174 iommu->pasid_allocator.free = intel_vcmd_ioasid_free;
3175 iommu->pasid_allocator.pdata = (void *)iommu;
3176 if (ioasid_register_allocator(&iommu->pasid_allocator)) {
3177 pr_warn("Custom PASID allocator failed, scalable mode disabled\n");
3179 * Disable scalable mode on this IOMMU if there
3180 * is no custom allocator. Mixing SM capable vIOMMU
3181 * and non-SM vIOMMU are not supported.
3188 static int __init init_dmars(void)
3190 struct dmar_drhd_unit *drhd;
3191 struct intel_iommu *iommu;
3197 * initialize and program root entry to not present
3200 for_each_drhd_unit(drhd) {
3202 * lock not needed as this is only incremented in the single
3203 * threaded kernel __init code path all other access are read
3206 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3210 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3213 /* Preallocate enough resources for IOMMU hot-addition */
3214 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3215 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3217 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3220 pr_err("Allocating global iommu array failed\n");
3225 ret = intel_cap_audit(CAP_AUDIT_STATIC_DMAR, NULL);
3229 for_each_iommu(iommu, drhd) {
3230 if (drhd->ignored) {
3231 iommu_disable_translation(iommu);
3236 * Find the max pasid size of all IOMMU's in the system.
3237 * We need to ensure the system pasid table is no bigger
3238 * than the smallest supported.
3240 if (pasid_supported(iommu)) {
3241 u32 temp = 2 << ecap_pss(iommu->ecap);
3243 intel_pasid_max_id = min_t(u32, temp,
3244 intel_pasid_max_id);
3247 g_iommus[iommu->seq_id] = iommu;
3249 intel_iommu_init_qi(iommu);
3251 ret = iommu_init_domains(iommu);
3255 init_translation_status(iommu);
3257 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3258 iommu_disable_translation(iommu);
3259 clear_translation_pre_enabled(iommu);
3260 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3266 * we could share the same root & context tables
3267 * among all IOMMU's. Need to Split it later.
3269 ret = iommu_alloc_root_entry(iommu);
3273 if (translation_pre_enabled(iommu)) {
3274 pr_info("Translation already enabled - trying to copy translation structures\n");
3276 ret = copy_translation_tables(iommu);
3279 * We found the IOMMU with translation
3280 * enabled - but failed to copy over the
3281 * old root-entry table. Try to proceed
3282 * by disabling translation now and
3283 * allocating a clean root-entry table.
3284 * This might cause DMAR faults, but
3285 * probably the dump will still succeed.
3287 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3289 iommu_disable_translation(iommu);
3290 clear_translation_pre_enabled(iommu);
3292 pr_info("Copied translation tables from previous kernel for %s\n",
3297 if (!ecap_pass_through(iommu->ecap))
3298 hw_pass_through = 0;
3299 intel_svm_check(iommu);
3303 * Now that qi is enabled on all iommus, set the root entry and flush
3304 * caches. This is required on some Intel X58 chipsets, otherwise the
3305 * flush_context function will loop forever and the boot hangs.
3307 for_each_active_iommu(iommu, drhd) {
3308 iommu_flush_write_buffer(iommu);
3309 #ifdef CONFIG_INTEL_IOMMU_SVM
3310 register_pasid_allocator(iommu);
3312 iommu_set_root_entry(iommu);
3315 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3320 iommu_identity_mapping |= IDENTMAP_GFX;
3322 check_tylersburg_isoch();
3324 ret = si_domain_init(hw_pass_through);
3331 * global invalidate context cache
3332 * global invalidate iotlb
3333 * enable translation
3335 for_each_iommu(iommu, drhd) {
3336 if (drhd->ignored) {
3338 * we always have to disable PMRs or DMA may fail on
3342 iommu_disable_protect_mem_regions(iommu);
3346 iommu_flush_write_buffer(iommu);
3348 #ifdef CONFIG_INTEL_IOMMU_SVM
3349 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3351 * Call dmar_alloc_hwirq() with dmar_global_lock held,
3352 * could cause possible lock race condition.
3354 up_write(&dmar_global_lock);
3355 ret = intel_svm_enable_prq(iommu);
3356 down_write(&dmar_global_lock);
3361 ret = dmar_set_interrupt(iommu);
3369 for_each_active_iommu(iommu, drhd) {
3370 disable_dmar_iommu(iommu);
3371 free_dmar_iommu(iommu);
3380 static inline int iommu_domain_cache_init(void)
3384 iommu_domain_cache = kmem_cache_create("iommu_domain",
3385 sizeof(struct dmar_domain),
3390 if (!iommu_domain_cache) {
3391 pr_err("Couldn't create iommu_domain cache\n");
3398 static inline int iommu_devinfo_cache_init(void)
3402 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3403 sizeof(struct device_domain_info),
3407 if (!iommu_devinfo_cache) {
3408 pr_err("Couldn't create devinfo cache\n");
3415 static int __init iommu_init_mempool(void)
3418 ret = iova_cache_get();
3422 ret = iommu_domain_cache_init();
3426 ret = iommu_devinfo_cache_init();
3430 kmem_cache_destroy(iommu_domain_cache);
3437 static void __init iommu_exit_mempool(void)
3439 kmem_cache_destroy(iommu_devinfo_cache);
3440 kmem_cache_destroy(iommu_domain_cache);
3444 static void __init init_no_remapping_devices(void)
3446 struct dmar_drhd_unit *drhd;
3450 for_each_drhd_unit(drhd) {
3451 if (!drhd->include_all) {
3452 for_each_active_dev_scope(drhd->devices,
3453 drhd->devices_cnt, i, dev)
3455 /* ignore DMAR unit if no devices exist */
3456 if (i == drhd->devices_cnt)
3461 for_each_active_drhd_unit(drhd) {
3462 if (drhd->include_all)
3465 for_each_active_dev_scope(drhd->devices,
3466 drhd->devices_cnt, i, dev)
3467 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
3469 if (i < drhd->devices_cnt)
3472 /* This IOMMU has *only* gfx devices. Either bypass it or
3473 set the gfx_mapped flag, as appropriate */
3474 drhd->gfx_dedicated = 1;
3480 #ifdef CONFIG_SUSPEND
3481 static int init_iommu_hw(void)
3483 struct dmar_drhd_unit *drhd;
3484 struct intel_iommu *iommu = NULL;
3486 for_each_active_iommu(iommu, drhd)
3488 dmar_reenable_qi(iommu);
3490 for_each_iommu(iommu, drhd) {
3491 if (drhd->ignored) {
3493 * we always have to disable PMRs or DMA may fail on
3497 iommu_disable_protect_mem_regions(iommu);
3501 iommu_flush_write_buffer(iommu);
3502 iommu_set_root_entry(iommu);
3503 iommu_enable_translation(iommu);
3504 iommu_disable_protect_mem_regions(iommu);
3510 static void iommu_flush_all(void)
3512 struct dmar_drhd_unit *drhd;
3513 struct intel_iommu *iommu;
3515 for_each_active_iommu(iommu, drhd) {
3516 iommu->flush.flush_context(iommu, 0, 0, 0,
3517 DMA_CCMD_GLOBAL_INVL);
3518 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3519 DMA_TLB_GLOBAL_FLUSH);
3523 static int iommu_suspend(void)
3525 struct dmar_drhd_unit *drhd;
3526 struct intel_iommu *iommu = NULL;
3529 for_each_active_iommu(iommu, drhd) {
3530 iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
3532 if (!iommu->iommu_state)
3538 for_each_active_iommu(iommu, drhd) {
3539 iommu_disable_translation(iommu);
3541 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3543 iommu->iommu_state[SR_DMAR_FECTL_REG] =
3544 readl(iommu->reg + DMAR_FECTL_REG);
3545 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
3546 readl(iommu->reg + DMAR_FEDATA_REG);
3547 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
3548 readl(iommu->reg + DMAR_FEADDR_REG);
3549 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
3550 readl(iommu->reg + DMAR_FEUADDR_REG);
3552 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3557 for_each_active_iommu(iommu, drhd)
3558 kfree(iommu->iommu_state);
3563 static void iommu_resume(void)
3565 struct dmar_drhd_unit *drhd;
3566 struct intel_iommu *iommu = NULL;
3569 if (init_iommu_hw()) {
3571 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
3573 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
3577 for_each_active_iommu(iommu, drhd) {
3579 raw_spin_lock_irqsave(&iommu->register_lock, flag);
3581 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
3582 iommu->reg + DMAR_FECTL_REG);
3583 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
3584 iommu->reg + DMAR_FEDATA_REG);
3585 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
3586 iommu->reg + DMAR_FEADDR_REG);
3587 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
3588 iommu->reg + DMAR_FEUADDR_REG);
3590 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
3593 for_each_active_iommu(iommu, drhd)
3594 kfree(iommu->iommu_state);
3597 static struct syscore_ops iommu_syscore_ops = {
3598 .resume = iommu_resume,
3599 .suspend = iommu_suspend,
3602 static void __init init_iommu_pm_ops(void)
3604 register_syscore_ops(&iommu_syscore_ops);
3608 static inline void init_iommu_pm_ops(void) {}
3609 #endif /* CONFIG_PM */
3611 static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
3613 if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
3614 !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
3615 rmrr->end_address <= rmrr->base_address ||
3616 arch_rmrr_sanity_check(rmrr))
3622 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3624 struct acpi_dmar_reserved_memory *rmrr;
3625 struct dmar_rmrr_unit *rmrru;
3627 rmrr = (struct acpi_dmar_reserved_memory *)header;
3628 if (rmrr_sanity_check(rmrr)) {
3630 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
3631 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
3632 rmrr->base_address, rmrr->end_address,
3633 dmi_get_system_info(DMI_BIOS_VENDOR),
3634 dmi_get_system_info(DMI_BIOS_VERSION),
3635 dmi_get_system_info(DMI_PRODUCT_VERSION));
3636 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
3639 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
3643 rmrru->hdr = header;
3645 rmrru->base_address = rmrr->base_address;
3646 rmrru->end_address = rmrr->end_address;
3648 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
3649 ((void *)rmrr) + rmrr->header.length,
3650 &rmrru->devices_cnt);
3651 if (rmrru->devices_cnt && rmrru->devices == NULL)
3654 list_add(&rmrru->list, &dmar_rmrr_units);
3663 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3665 struct dmar_atsr_unit *atsru;
3666 struct acpi_dmar_atsr *tmp;
3668 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
3670 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3671 if (atsr->segment != tmp->segment)
3673 if (atsr->header.length != tmp->header.length)
3675 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3682 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3684 struct acpi_dmar_atsr *atsr;
3685 struct dmar_atsr_unit *atsru;
3687 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
3690 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3691 atsru = dmar_find_atsr(atsr);
3695 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
3700 * If memory is allocated from slab by ACPI _DSM method, we need to
3701 * copy the memory content because the memory buffer will be freed
3704 atsru->hdr = (void *)(atsru + 1);
3705 memcpy(atsru->hdr, hdr, hdr->length);
3706 atsru->include_all = atsr->flags & 0x1;
3707 if (!atsru->include_all) {
3708 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
3709 (void *)atsr + atsr->header.length,
3710 &atsru->devices_cnt);
3711 if (atsru->devices_cnt && atsru->devices == NULL) {
3717 list_add_rcu(&atsru->list, &dmar_atsr_units);
3722 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3724 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3728 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3730 struct acpi_dmar_atsr *atsr;
3731 struct dmar_atsr_unit *atsru;
3733 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3734 atsru = dmar_find_atsr(atsr);
3736 list_del_rcu(&atsru->list);
3738 intel_iommu_free_atsr(atsru);
3744 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3748 struct acpi_dmar_atsr *atsr;
3749 struct dmar_atsr_unit *atsru;
3751 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3752 atsru = dmar_find_atsr(atsr);
3756 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
3757 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3765 static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
3767 struct dmar_satc_unit *satcu;
3768 struct acpi_dmar_satc *tmp;
3770 list_for_each_entry_rcu(satcu, &dmar_satc_units, list,
3772 tmp = (struct acpi_dmar_satc *)satcu->hdr;
3773 if (satc->segment != tmp->segment)
3775 if (satc->header.length != tmp->header.length)
3777 if (memcmp(satc, tmp, satc->header.length) == 0)
3784 int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
3786 struct acpi_dmar_satc *satc;
3787 struct dmar_satc_unit *satcu;
3789 if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
3792 satc = container_of(hdr, struct acpi_dmar_satc, header);
3793 satcu = dmar_find_satc(satc);
3797 satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL);
3801 satcu->hdr = (void *)(satcu + 1);
3802 memcpy(satcu->hdr, hdr, hdr->length);
3803 satcu->atc_required = satc->flags & 0x1;
3804 satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1),
3805 (void *)satc + satc->header.length,
3806 &satcu->devices_cnt);
3807 if (satcu->devices_cnt && !satcu->devices) {
3811 list_add_rcu(&satcu->list, &dmar_satc_units);
3816 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3819 struct intel_iommu *iommu = dmaru->iommu;
3821 if (g_iommus[iommu->seq_id])
3824 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_DMAR, iommu);
3828 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3829 pr_warn("%s: Doesn't support hardware pass through.\n",
3833 if (!ecap_sc_support(iommu->ecap) &&
3834 domain_update_iommu_snooping(iommu)) {
3835 pr_warn("%s: Doesn't support snooping.\n",
3839 sp = domain_update_iommu_superpage(NULL, iommu) - 1;
3840 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3841 pr_warn("%s: Doesn't support large page.\n",
3847 * Disable translation if already enabled prior to OS handover.
3849 if (iommu->gcmd & DMA_GCMD_TE)
3850 iommu_disable_translation(iommu);
3852 g_iommus[iommu->seq_id] = iommu;
3853 ret = iommu_init_domains(iommu);
3855 ret = iommu_alloc_root_entry(iommu);
3859 intel_svm_check(iommu);
3861 if (dmaru->ignored) {
3863 * we always have to disable PMRs or DMA may fail on this device
3866 iommu_disable_protect_mem_regions(iommu);
3870 intel_iommu_init_qi(iommu);
3871 iommu_flush_write_buffer(iommu);
3873 #ifdef CONFIG_INTEL_IOMMU_SVM
3874 if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
3875 ret = intel_svm_enable_prq(iommu);
3880 ret = dmar_set_interrupt(iommu);
3884 iommu_set_root_entry(iommu);
3885 iommu_enable_translation(iommu);
3887 iommu_disable_protect_mem_regions(iommu);
3891 disable_dmar_iommu(iommu);
3893 free_dmar_iommu(iommu);
3897 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3900 struct intel_iommu *iommu = dmaru->iommu;
3902 if (!intel_iommu_enabled)
3908 ret = intel_iommu_add(dmaru);
3910 disable_dmar_iommu(iommu);
3911 free_dmar_iommu(iommu);
3917 static void intel_iommu_free_dmars(void)
3919 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3920 struct dmar_atsr_unit *atsru, *atsr_n;
3921 struct dmar_satc_unit *satcu, *satc_n;
3923 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3924 list_del(&rmrru->list);
3925 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3929 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3930 list_del(&atsru->list);
3931 intel_iommu_free_atsr(atsru);
3933 list_for_each_entry_safe(satcu, satc_n, &dmar_satc_units, list) {
3934 list_del(&satcu->list);
3935 dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt);
3940 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
3943 struct pci_bus *bus;
3944 struct pci_dev *bridge = NULL;
3946 struct acpi_dmar_atsr *atsr;
3947 struct dmar_atsr_unit *atsru;
3949 dev = pci_physfn(dev);
3950 for (bus = dev->bus; bus; bus = bus->parent) {
3952 /* If it's an integrated device, allow ATS */
3955 /* Connected via non-PCIe: no ATS */
3956 if (!pci_is_pcie(bridge) ||
3957 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
3959 /* If we found the root port, look it up in the ATSR */
3960 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
3965 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3966 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3967 if (atsr->segment != pci_domain_nr(dev->bus))
3970 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
3971 if (tmp == &bridge->dev)
3974 if (atsru->include_all)
3984 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3987 struct dmar_rmrr_unit *rmrru;
3988 struct dmar_atsr_unit *atsru;
3989 struct dmar_satc_unit *satcu;
3990 struct acpi_dmar_atsr *atsr;
3991 struct acpi_dmar_reserved_memory *rmrr;
3992 struct acpi_dmar_satc *satc;
3994 if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
3997 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
3998 rmrr = container_of(rmrru->hdr,
3999 struct acpi_dmar_reserved_memory, header);
4000 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4001 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4002 ((void *)rmrr) + rmrr->header.length,
4003 rmrr->segment, rmrru->devices,
4004 rmrru->devices_cnt);
4007 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4008 dmar_remove_dev_scope(info, rmrr->segment,
4009 rmrru->devices, rmrru->devices_cnt);
4013 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4014 if (atsru->include_all)
4017 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4018 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4019 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4020 (void *)atsr + atsr->header.length,
4021 atsr->segment, atsru->devices,
4022 atsru->devices_cnt);
4027 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4028 if (dmar_remove_dev_scope(info, atsr->segment,
4029 atsru->devices, atsru->devices_cnt))
4033 list_for_each_entry(satcu, &dmar_satc_units, list) {
4034 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
4035 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4036 ret = dmar_insert_dev_scope(info, (void *)(satc + 1),
4037 (void *)satc + satc->header.length,
4038 satc->segment, satcu->devices,
4039 satcu->devices_cnt);
4044 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4045 if (dmar_remove_dev_scope(info, satc->segment,
4046 satcu->devices, satcu->devices_cnt))
4054 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4055 unsigned long val, void *v)
4057 struct memory_notify *mhp = v;
4058 unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4059 unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
4063 case MEM_GOING_ONLINE:
4064 if (iommu_domain_identity_map(si_domain,
4065 start_vpfn, last_vpfn)) {
4066 pr_warn("Failed to build identity map for [%lx-%lx]\n",
4067 start_vpfn, last_vpfn);
4073 case MEM_CANCEL_ONLINE:
4075 struct dmar_drhd_unit *drhd;
4076 struct intel_iommu *iommu;
4077 struct page *freelist;
4079 freelist = domain_unmap(si_domain,
4080 start_vpfn, last_vpfn,
4084 for_each_active_iommu(iommu, drhd)
4085 iommu_flush_iotlb_psi(iommu, si_domain,
4086 start_vpfn, mhp->nr_pages,
4089 dma_free_pagelist(freelist);
4097 static struct notifier_block intel_iommu_memory_nb = {
4098 .notifier_call = intel_iommu_memory_notifier,
4102 static void intel_disable_iommus(void)
4104 struct intel_iommu *iommu = NULL;
4105 struct dmar_drhd_unit *drhd;
4107 for_each_iommu(iommu, drhd)
4108 iommu_disable_translation(iommu);
4111 void intel_iommu_shutdown(void)
4113 struct dmar_drhd_unit *drhd;
4114 struct intel_iommu *iommu = NULL;
4116 if (no_iommu || dmar_disabled)
4119 down_write(&dmar_global_lock);
4121 /* Disable PMRs explicitly here. */
4122 for_each_iommu(iommu, drhd)
4123 iommu_disable_protect_mem_regions(iommu);
4125 /* Make sure the IOMMUs are switched off */
4126 intel_disable_iommus();
4128 up_write(&dmar_global_lock);
4131 static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4133 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4135 return container_of(iommu_dev, struct intel_iommu, iommu);
4138 static ssize_t intel_iommu_show_version(struct device *dev,
4139 struct device_attribute *attr,
4142 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4143 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4144 return sprintf(buf, "%d:%d\n",
4145 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4147 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4149 static ssize_t intel_iommu_show_address(struct device *dev,
4150 struct device_attribute *attr,
4153 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4154 return sprintf(buf, "%llx\n", iommu->reg_phys);
4156 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4158 static ssize_t intel_iommu_show_cap(struct device *dev,
4159 struct device_attribute *attr,
4162 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4163 return sprintf(buf, "%llx\n", iommu->cap);
4165 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4167 static ssize_t intel_iommu_show_ecap(struct device *dev,
4168 struct device_attribute *attr,
4171 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4172 return sprintf(buf, "%llx\n", iommu->ecap);
4174 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4176 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4177 struct device_attribute *attr,
4180 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4181 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4183 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4185 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4186 struct device_attribute *attr,
4189 struct intel_iommu *iommu = dev_to_intel_iommu(dev);
4190 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4191 cap_ndoms(iommu->cap)));
4193 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4195 static struct attribute *intel_iommu_attrs[] = {
4196 &dev_attr_version.attr,
4197 &dev_attr_address.attr,
4199 &dev_attr_ecap.attr,
4200 &dev_attr_domains_supported.attr,
4201 &dev_attr_domains_used.attr,
4205 static struct attribute_group intel_iommu_group = {
4206 .name = "intel-iommu",
4207 .attrs = intel_iommu_attrs,
4210 const struct attribute_group *intel_iommu_groups[] = {
4215 static inline bool has_external_pci(void)
4217 struct pci_dev *pdev = NULL;
4219 for_each_pci_dev(pdev)
4220 if (pdev->external_facing)
4226 static int __init platform_optin_force_iommu(void)
4228 if (!dmar_platform_optin() || no_platform_optin || !has_external_pci())
4231 if (no_iommu || dmar_disabled)
4232 pr_info("Intel-IOMMU force enabled due to platform opt in\n");
4235 * If Intel-IOMMU is disabled by default, we will apply identity
4236 * map for all devices except those marked as being untrusted.
4239 iommu_set_default_passthrough(false);
4247 static int __init probe_acpi_namespace_devices(void)
4249 struct dmar_drhd_unit *drhd;
4250 /* To avoid a -Wunused-but-set-variable warning. */
4251 struct intel_iommu *iommu __maybe_unused;
4255 for_each_active_iommu(iommu, drhd) {
4256 for_each_active_dev_scope(drhd->devices,
4257 drhd->devices_cnt, i, dev) {
4258 struct acpi_device_physical_node *pn;
4259 struct iommu_group *group;
4260 struct acpi_device *adev;
4262 if (dev->bus != &acpi_bus_type)
4265 adev = to_acpi_device(dev);
4266 mutex_lock(&adev->physical_node_lock);
4267 list_for_each_entry(pn,
4268 &adev->physical_node_list, node) {
4269 group = iommu_group_get(pn->dev);
4271 iommu_group_put(group);
4275 pn->dev->bus->iommu_ops = &intel_iommu_ops;
4276 ret = iommu_probe_device(pn->dev);
4280 mutex_unlock(&adev->physical_node_lock);
4290 int __init intel_iommu_init(void)
4293 struct dmar_drhd_unit *drhd;
4294 struct intel_iommu *iommu;
4297 * Intel IOMMU is required for a TXT/tboot launch or platform
4298 * opt in, so enforce that.
4300 force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
4301 platform_optin_force_iommu();
4303 if (iommu_init_mempool()) {
4305 panic("tboot: Failed to initialize iommu memory\n");
4309 down_write(&dmar_global_lock);
4310 if (dmar_table_init()) {
4312 panic("tboot: Failed to initialize DMAR table\n");
4316 if (dmar_dev_scope_init() < 0) {
4318 panic("tboot: Failed to initialize DMAR device scope\n");
4322 up_write(&dmar_global_lock);
4325 * The bus notifier takes the dmar_global_lock, so lockdep will
4326 * complain later when we register it under the lock.
4328 dmar_register_bus_notifier();
4330 down_write(&dmar_global_lock);
4333 intel_iommu_debugfs_init();
4335 if (no_iommu || dmar_disabled) {
4337 * We exit the function here to ensure IOMMU's remapping and
4338 * mempool aren't setup, which means that the IOMMU's PMRs
4339 * won't be disabled via the call to init_dmars(). So disable
4340 * it explicitly here. The PMRs were setup by tboot prior to
4341 * calling SENTER, but the kernel is expected to reset/tear
4344 if (intel_iommu_tboot_noforce) {
4345 for_each_iommu(iommu, drhd)
4346 iommu_disable_protect_mem_regions(iommu);
4350 * Make sure the IOMMUs are switched off, even when we
4351 * boot into a kexec kernel and the previous kernel left
4354 intel_disable_iommus();
4358 if (list_empty(&dmar_rmrr_units))
4359 pr_info("No RMRR found\n");
4361 if (list_empty(&dmar_atsr_units))
4362 pr_info("No ATSR found\n");
4364 if (list_empty(&dmar_satc_units))
4365 pr_info("No SATC found\n");
4368 intel_iommu_gfx_mapped = 1;
4370 init_no_remapping_devices();
4375 panic("tboot: Failed to initialize DMARs\n");
4376 pr_err("Initialization failed\n");
4379 up_write(&dmar_global_lock);
4381 init_iommu_pm_ops();
4383 down_read(&dmar_global_lock);
4384 for_each_active_iommu(iommu, drhd) {
4386 * The flush queue implementation does not perform
4387 * page-selective invalidations that are required for efficient
4388 * TLB flushes in virtual environments. The benefit of batching
4389 * is likely to be much lower than the overhead of synchronizing
4390 * the virtual and physical IOMMU page-tables.
4392 if (!intel_iommu_strict && cap_caching_mode(iommu->cap)) {
4393 pr_warn("IOMMU batching is disabled due to virtualization");
4394 intel_iommu_strict = 1;
4396 iommu_device_sysfs_add(&iommu->iommu, NULL,
4399 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
4401 up_read(&dmar_global_lock);
4403 iommu_set_dma_strict(intel_iommu_strict);
4404 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4405 if (si_domain && !hw_pass_through)
4406 register_memory_notifier(&intel_iommu_memory_nb);
4408 down_read(&dmar_global_lock);
4409 if (probe_acpi_namespace_devices())
4410 pr_warn("ACPI name space devices didn't probe correctly\n");
4412 /* Finally, we enable the DMA remapping hardware. */
4413 for_each_iommu(iommu, drhd) {
4414 if (!drhd->ignored && !translation_pre_enabled(iommu))
4415 iommu_enable_translation(iommu);
4417 iommu_disable_protect_mem_regions(iommu);
4419 up_read(&dmar_global_lock);
4421 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4423 intel_iommu_enabled = 1;
4428 intel_iommu_free_dmars();
4429 up_write(&dmar_global_lock);
4430 iommu_exit_mempool();
4434 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4436 struct intel_iommu *iommu = opaque;
4438 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4443 * NB - intel-iommu lacks any sort of reference counting for the users of
4444 * dependent devices. If multiple endpoints have intersecting dependent
4445 * devices, unbinding the driver from any one of them will possibly leave
4446 * the others unable to operate.
4448 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4450 if (!iommu || !dev || !dev_is_pci(dev))
4453 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4456 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4458 struct dmar_domain *domain;
4459 struct intel_iommu *iommu;
4460 unsigned long flags;
4462 assert_spin_locked(&device_domain_lock);
4467 iommu = info->iommu;
4468 domain = info->domain;
4471 if (dev_is_pci(info->dev) && sm_supported(iommu))
4472 intel_pasid_tear_down_entry(iommu, info->dev,
4473 PASID_RID2PASID, false);
4475 iommu_disable_dev_iotlb(info);
4476 if (!dev_is_real_dma_subdevice(info->dev))
4477 domain_context_clear(iommu, info->dev);
4478 intel_pasid_free_table(info->dev);
4481 unlink_domain_info(info);
4483 spin_lock_irqsave(&iommu->lock, flags);
4484 domain_detach_iommu(domain, iommu);
4485 spin_unlock_irqrestore(&iommu->lock, flags);
4487 free_devinfo_mem(info);
4490 static void dmar_remove_one_dev_info(struct device *dev)
4492 struct device_domain_info *info;
4493 unsigned long flags;
4495 spin_lock_irqsave(&device_domain_lock, flags);
4496 info = get_domain_info(dev);
4498 __dmar_remove_one_dev_info(info);
4499 spin_unlock_irqrestore(&device_domain_lock, flags);
4502 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4506 /* calculate AGAW */
4507 domain->gaw = guest_width;
4508 adjust_width = guestwidth_to_adjustwidth(guest_width);
4509 domain->agaw = width_to_agaw(adjust_width);
4511 domain->iommu_coherency = 0;
4512 domain->iommu_snooping = 0;
4513 domain->iommu_superpage = 0;
4514 domain->max_addr = 0;
4516 /* always allocate the top pgd */
4517 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4520 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4524 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4526 struct dmar_domain *dmar_domain;
4527 struct iommu_domain *domain;
4530 case IOMMU_DOMAIN_DMA:
4531 case IOMMU_DOMAIN_UNMANAGED:
4532 dmar_domain = alloc_domain(0);
4534 pr_err("Can't allocate dmar_domain\n");
4537 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4538 pr_err("Domain initialization failed\n");
4539 domain_exit(dmar_domain);
4543 if (type == IOMMU_DOMAIN_DMA &&
4544 iommu_get_dma_cookie(&dmar_domain->domain))
4547 domain = &dmar_domain->domain;
4548 domain->geometry.aperture_start = 0;
4549 domain->geometry.aperture_end =
4550 __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4551 domain->geometry.force_aperture = true;
4554 case IOMMU_DOMAIN_IDENTITY:
4555 return &si_domain->domain;
4563 static void intel_iommu_domain_free(struct iommu_domain *domain)
4565 if (domain != &si_domain->domain)
4566 domain_exit(to_dmar_domain(domain));
4570 * Check whether a @domain could be attached to the @dev through the
4571 * aux-domain attach/detach APIs.
4574 is_aux_domain(struct device *dev, struct iommu_domain *domain)
4576 struct device_domain_info *info = get_domain_info(dev);
4578 return info && info->auxd_enabled &&
4579 domain->type == IOMMU_DOMAIN_UNMANAGED;
4582 static inline struct subdev_domain_info *
4583 lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
4585 struct subdev_domain_info *sinfo;
4587 if (!list_empty(&domain->subdevices)) {
4588 list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
4589 if (sinfo->pdev == dev)
4597 static int auxiliary_link_device(struct dmar_domain *domain,
4600 struct device_domain_info *info = get_domain_info(dev);
4601 struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
4603 assert_spin_locked(&device_domain_lock);
4608 sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
4609 sinfo->domain = domain;
4611 list_add(&sinfo->link_phys, &info->subdevices);
4612 list_add(&sinfo->link_domain, &domain->subdevices);
4615 return ++sinfo->users;
4618 static int auxiliary_unlink_device(struct dmar_domain *domain,
4621 struct device_domain_info *info = get_domain_info(dev);
4622 struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
4625 assert_spin_locked(&device_domain_lock);
4626 if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
4629 ret = --sinfo->users;
4631 list_del(&sinfo->link_phys);
4632 list_del(&sinfo->link_domain);
4639 static int aux_domain_add_dev(struct dmar_domain *domain,
4643 unsigned long flags;
4644 struct intel_iommu *iommu;
4646 iommu = device_to_iommu(dev, NULL, NULL);
4650 if (domain->default_pasid <= 0) {
4653 /* No private data needed for the default pasid */
4654 pasid = ioasid_alloc(NULL, PASID_MIN,
4655 pci_max_pasids(to_pci_dev(dev)) - 1,
4657 if (pasid == INVALID_IOASID) {
4658 pr_err("Can't allocate default pasid\n");
4661 domain->default_pasid = pasid;
4664 spin_lock_irqsave(&device_domain_lock, flags);
4665 ret = auxiliary_link_device(domain, dev);
4670 * Subdevices from the same physical device can be attached to the
4671 * same domain. For such cases, only the first subdevice attachment
4672 * needs to go through the full steps in this function. So if ret >
4679 * iommu->lock must be held to attach domain to iommu and setup the
4680 * pasid entry for second level translation.
4682 spin_lock(&iommu->lock);
4683 ret = domain_attach_iommu(domain, iommu);
4687 /* Setup the PASID entry for mediated devices: */
4688 if (domain_use_first_level(domain))
4689 ret = domain_setup_first_level(iommu, domain, dev,
4690 domain->default_pasid);
4692 ret = intel_pasid_setup_second_level(iommu, domain, dev,
4693 domain->default_pasid);
4697 spin_unlock(&iommu->lock);
4699 spin_unlock_irqrestore(&device_domain_lock, flags);
4704 domain_detach_iommu(domain, iommu);
4706 spin_unlock(&iommu->lock);
4707 auxiliary_unlink_device(domain, dev);
4709 spin_unlock_irqrestore(&device_domain_lock, flags);
4710 if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
4711 ioasid_put(domain->default_pasid);
4716 static void aux_domain_remove_dev(struct dmar_domain *domain,
4719 struct device_domain_info *info;
4720 struct intel_iommu *iommu;
4721 unsigned long flags;
4723 if (!is_aux_domain(dev, &domain->domain))
4726 spin_lock_irqsave(&device_domain_lock, flags);
4727 info = get_domain_info(dev);
4728 iommu = info->iommu;
4730 if (!auxiliary_unlink_device(domain, dev)) {
4731 spin_lock(&iommu->lock);
4732 intel_pasid_tear_down_entry(iommu, dev,
4733 domain->default_pasid, false);
4734 domain_detach_iommu(domain, iommu);
4735 spin_unlock(&iommu->lock);
4738 spin_unlock_irqrestore(&device_domain_lock, flags);
4740 if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
4741 ioasid_put(domain->default_pasid);
4744 static int prepare_domain_attach_device(struct iommu_domain *domain,
4747 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4748 struct intel_iommu *iommu;
4751 iommu = device_to_iommu(dev, NULL, NULL);
4755 /* check if this iommu agaw is sufficient for max mapped address */
4756 addr_width = agaw_to_width(iommu->agaw);
4757 if (addr_width > cap_mgaw(iommu->cap))
4758 addr_width = cap_mgaw(iommu->cap);
4760 if (dmar_domain->max_addr > (1LL << addr_width)) {
4761 dev_err(dev, "%s: iommu width (%d) is not "
4762 "sufficient for the mapped address (%llx)\n",
4763 __func__, addr_width, dmar_domain->max_addr);
4766 dmar_domain->gaw = addr_width;
4769 * Knock out extra levels of page tables if necessary
4771 while (iommu->agaw < dmar_domain->agaw) {
4772 struct dma_pte *pte;
4774 pte = dmar_domain->pgd;
4775 if (dma_pte_present(pte)) {
4776 dmar_domain->pgd = (struct dma_pte *)
4777 phys_to_virt(dma_pte_addr(pte));
4778 free_pgtable_page(pte);
4780 dmar_domain->agaw--;
4786 static int intel_iommu_attach_device(struct iommu_domain *domain,
4791 if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
4792 device_is_rmrr_locked(dev)) {
4793 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
4797 if (is_aux_domain(dev, domain))
4800 /* normally dev is not mapped */
4801 if (unlikely(domain_context_mapped(dev))) {
4802 struct dmar_domain *old_domain;
4804 old_domain = find_domain(dev);
4806 dmar_remove_one_dev_info(dev);
4809 ret = prepare_domain_attach_device(domain, dev);
4813 return domain_add_dev_info(to_dmar_domain(domain), dev);
4816 static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
4821 if (!is_aux_domain(dev, domain))
4824 ret = prepare_domain_attach_device(domain, dev);
4828 return aux_domain_add_dev(to_dmar_domain(domain), dev);
4831 static void intel_iommu_detach_device(struct iommu_domain *domain,
4834 dmar_remove_one_dev_info(dev);
4837 static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
4840 aux_domain_remove_dev(to_dmar_domain(domain), dev);
4843 #ifdef CONFIG_INTEL_IOMMU_SVM
4845 * 2D array for converting and sanitizing IOMMU generic TLB granularity to
4846 * VT-d granularity. Invalidation is typically included in the unmap operation
4847 * as a result of DMA or VFIO unmap. However, for assigned devices guest
4848 * owns the first level page tables. Invalidations of translation caches in the
4849 * guest are trapped and passed down to the host.
4851 * vIOMMU in the guest will only expose first level page tables, therefore
4852 * we do not support IOTLB granularity for request without PASID (second level).
4854 * For example, to find the VT-d granularity encoding for IOTLB
4855 * type and page selective granularity within PASID:
4856 * X: indexed by iommu cache type
4857 * Y: indexed by enum iommu_inv_granularity
4858 * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
4862 inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
4864 * PASID based IOTLB invalidation: PASID selective (per PASID),
4865 * page selective (address granularity)
4867 {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
4868 /* PASID based dev TLBs */
4869 {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
4871 {-EINVAL, -EINVAL, -EINVAL}
4874 static inline int to_vtd_granularity(int type, int granu)
4876 return inv_type_granu_table[type][granu];
4879 static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
4881 u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
4883 /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
4884 * IOMMU cache invalidate API passes granu_size in bytes, and number of
4885 * granu size in contiguous memory.
4887 return order_base_2(nr_pages);
4891 intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
4892 struct iommu_cache_invalidate_info *inv_info)
4894 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4895 struct device_domain_info *info;
4896 struct intel_iommu *iommu;
4897 unsigned long flags;
4904 if (!inv_info || !dmar_domain)
4907 if (!dev || !dev_is_pci(dev))
4910 iommu = device_to_iommu(dev, &bus, &devfn);
4914 if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
4917 spin_lock_irqsave(&device_domain_lock, flags);
4918 spin_lock(&iommu->lock);
4919 info = get_domain_info(dev);
4924 did = dmar_domain->iommu_did[iommu->seq_id];
4925 sid = PCI_DEVID(bus, devfn);
4927 /* Size is only valid in address selective invalidation */
4928 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
4929 size = to_vtd_size(inv_info->granu.addr_info.granule_size,
4930 inv_info->granu.addr_info.nb_granules);
4932 for_each_set_bit(cache_type,
4933 (unsigned long *)&inv_info->cache,
4934 IOMMU_CACHE_INV_TYPE_NR) {
4939 granu = to_vtd_granularity(cache_type, inv_info->granularity);
4940 if (granu == -EINVAL) {
4941 pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
4942 cache_type, inv_info->granularity);
4947 * PASID is stored in different locations based on the
4950 if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
4951 (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
4952 pasid = inv_info->granu.pasid_info.pasid;
4953 else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
4954 (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
4955 pasid = inv_info->granu.addr_info.pasid;
4957 switch (BIT(cache_type)) {
4958 case IOMMU_CACHE_INV_TYPE_IOTLB:
4959 /* HW will ignore LSB bits based on address mask */
4960 if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
4962 (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
4963 pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
4964 inv_info->granu.addr_info.addr, size);
4968 * If granu is PASID-selective, address is ignored.
4969 * We use npages = -1 to indicate that.
4971 qi_flush_piotlb(iommu, did, pasid,
4972 mm_to_dma_pfn(inv_info->granu.addr_info.addr),
4973 (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
4974 inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
4976 if (!info->ats_enabled)
4979 * Always flush device IOTLB if ATS is enabled. vIOMMU
4980 * in the guest may assume IOTLB flush is inclusive,
4981 * which is more efficient.
4984 case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
4986 * PASID based device TLB invalidation does not support
4987 * IOMMU_INV_GRANU_PASID granularity but only supports
4988 * IOMMU_INV_GRANU_ADDR.
4989 * The equivalent of that is we set the size to be the
4990 * entire range of 64 bit. User only provides PASID info
4991 * without address info. So we set addr to 0.
4993 if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
4994 size = 64 - VTD_PAGE_SHIFT;
4996 } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
4997 addr = inv_info->granu.addr_info.addr;
5000 if (info->ats_enabled)
5001 qi_flush_dev_iotlb_pasid(iommu, sid,
5003 info->ats_qdep, addr,
5006 pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
5009 dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
5015 spin_unlock(&iommu->lock);
5016 spin_unlock_irqrestore(&device_domain_lock, flags);
5022 static int intel_iommu_map(struct iommu_domain *domain,
5023 unsigned long iova, phys_addr_t hpa,
5024 size_t size, int iommu_prot, gfp_t gfp)
5026 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5030 if (iommu_prot & IOMMU_READ)
5031 prot |= DMA_PTE_READ;
5032 if (iommu_prot & IOMMU_WRITE)
5033 prot |= DMA_PTE_WRITE;
5034 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5035 prot |= DMA_PTE_SNP;
5037 max_addr = iova + size;
5038 if (dmar_domain->max_addr < max_addr) {
5041 /* check if minimum agaw is sufficient for mapped address */
5042 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5043 if (end < max_addr) {
5044 pr_err("%s: iommu width (%d) is not "
5045 "sufficient for the mapped address (%llx)\n",
5046 __func__, dmar_domain->gaw, max_addr);
5049 dmar_domain->max_addr = max_addr;
5051 /* Round up size to next multiple of PAGE_SIZE, if it and
5052 the low bits of hpa would take us onto the next page */
5053 size = aligned_nrpages(hpa, size);
5054 return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5055 hpa >> VTD_PAGE_SHIFT, size, prot);
5058 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5059 unsigned long iova, size_t size,
5060 struct iommu_iotlb_gather *gather)
5062 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5063 unsigned long start_pfn, last_pfn;
5066 /* Cope with horrid API which requires us to unmap more than the
5067 size argument if it happens to be a large-page mapping. */
5068 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5070 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5071 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5073 start_pfn = iova >> VTD_PAGE_SHIFT;
5074 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5076 gather->freelist = domain_unmap(dmar_domain, start_pfn,
5077 last_pfn, gather->freelist);
5079 if (dmar_domain->max_addr == iova + size)
5080 dmar_domain->max_addr = iova;
5082 iommu_iotlb_gather_add_page(domain, gather, iova, size);
5087 static void intel_iommu_tlb_sync(struct iommu_domain *domain,
5088 struct iommu_iotlb_gather *gather)
5090 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5091 unsigned long iova_pfn = IOVA_PFN(gather->start);
5092 size_t size = gather->end - gather->start;
5093 unsigned long start_pfn;
5094 unsigned long nrpages;
5097 nrpages = aligned_nrpages(gather->start, size);
5098 start_pfn = mm_to_dma_pfn(iova_pfn);
5100 for_each_domain_iommu(iommu_id, dmar_domain)
5101 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5102 start_pfn, nrpages, !gather->freelist, 0);
5104 dma_free_pagelist(gather->freelist);
5107 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5110 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5111 struct dma_pte *pte;
5115 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5116 if (pte && dma_pte_present(pte))
5117 phys = dma_pte_addr(pte) +
5118 (iova & (BIT_MASK(level_to_offset_bits(level) +
5119 VTD_PAGE_SHIFT) - 1));
5124 static bool intel_iommu_capable(enum iommu_cap cap)
5126 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5127 return domain_update_iommu_snooping(NULL) == 1;
5128 if (cap == IOMMU_CAP_INTR_REMAP)
5129 return irq_remapping_enabled == 1;
5134 static struct iommu_device *intel_iommu_probe_device(struct device *dev)
5136 struct intel_iommu *iommu;
5138 iommu = device_to_iommu(dev, NULL, NULL);
5140 return ERR_PTR(-ENODEV);
5142 if (translation_pre_enabled(iommu))
5143 dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
5145 return &iommu->iommu;
5148 static void intel_iommu_release_device(struct device *dev)
5150 struct intel_iommu *iommu;
5152 iommu = device_to_iommu(dev, NULL, NULL);
5156 dmar_remove_one_dev_info(dev);
5158 set_dma_ops(dev, NULL);
5161 static void intel_iommu_probe_finalize(struct device *dev)
5163 dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
5164 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
5165 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5167 if (domain && domain->type == IOMMU_DOMAIN_DMA)
5168 iommu_setup_dma_ops(dev, base,
5169 __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
5171 set_dma_ops(dev, NULL);
5174 static void intel_iommu_get_resv_regions(struct device *device,
5175 struct list_head *head)
5177 int prot = DMA_PTE_READ | DMA_PTE_WRITE;
5178 struct iommu_resv_region *reg;
5179 struct dmar_rmrr_unit *rmrr;
5180 struct device *i_dev;
5183 down_read(&dmar_global_lock);
5184 for_each_rmrr_units(rmrr) {
5185 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
5187 struct iommu_resv_region *resv;
5188 enum iommu_resv_type type;
5191 if (i_dev != device &&
5192 !is_downstream_to_pci_bridge(device, i_dev))
5195 length = rmrr->end_address - rmrr->base_address + 1;
5197 type = device_rmrr_is_relaxable(device) ?
5198 IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
5200 resv = iommu_alloc_resv_region(rmrr->base_address,
5201 length, prot, type);
5205 list_add_tail(&resv->list, head);
5208 up_read(&dmar_global_lock);
5210 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
5211 if (dev_is_pci(device)) {
5212 struct pci_dev *pdev = to_pci_dev(device);
5214 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
5215 reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
5216 IOMMU_RESV_DIRECT_RELAXABLE);
5218 list_add_tail(®->list, head);
5221 #endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
5223 reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
5224 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
5228 list_add_tail(®->list, head);
5231 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
5233 struct device_domain_info *info;
5234 struct context_entry *context;
5235 struct dmar_domain *domain;
5236 unsigned long flags;
5240 domain = find_domain(dev);
5244 spin_lock_irqsave(&device_domain_lock, flags);
5245 spin_lock(&iommu->lock);
5248 info = get_domain_info(dev);
5249 if (!info || !info->pasid_supported)
5252 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5253 if (WARN_ON(!context))
5256 ctx_lo = context[0].lo;
5258 if (!(ctx_lo & CONTEXT_PASIDE)) {
5259 ctx_lo |= CONTEXT_PASIDE;
5260 context[0].lo = ctx_lo;
5262 iommu->flush.flush_context(iommu,
5263 domain->iommu_did[iommu->seq_id],
5264 PCI_DEVID(info->bus, info->devfn),
5265 DMA_CCMD_MASK_NOBIT,
5266 DMA_CCMD_DEVICE_INVL);
5269 /* Enable PASID support in the device, if it wasn't already */
5270 if (!info->pasid_enabled)
5271 iommu_enable_dev_iotlb(info);
5276 spin_unlock(&iommu->lock);
5277 spin_unlock_irqrestore(&device_domain_lock, flags);
5282 static struct iommu_group *intel_iommu_device_group(struct device *dev)
5284 if (dev_is_pci(dev))
5285 return pci_device_group(dev);
5286 return generic_device_group(dev);
5289 static int intel_iommu_enable_auxd(struct device *dev)
5291 struct device_domain_info *info;
5292 struct intel_iommu *iommu;
5293 unsigned long flags;
5296 iommu = device_to_iommu(dev, NULL, NULL);
5297 if (!iommu || dmar_disabled)
5300 if (!sm_supported(iommu) || !pasid_supported(iommu))
5303 ret = intel_iommu_enable_pasid(iommu, dev);
5307 spin_lock_irqsave(&device_domain_lock, flags);
5308 info = get_domain_info(dev);
5309 info->auxd_enabled = 1;
5310 spin_unlock_irqrestore(&device_domain_lock, flags);
5315 static int intel_iommu_disable_auxd(struct device *dev)
5317 struct device_domain_info *info;
5318 unsigned long flags;
5320 spin_lock_irqsave(&device_domain_lock, flags);
5321 info = get_domain_info(dev);
5322 if (!WARN_ON(!info))
5323 info->auxd_enabled = 0;
5324 spin_unlock_irqrestore(&device_domain_lock, flags);
5330 * A PCI express designated vendor specific extended capability is defined
5331 * in the section 3.7 of Intel scalable I/O virtualization technical spec
5332 * for system software and tools to detect endpoint devices supporting the
5333 * Intel scalable IO virtualization without host driver dependency.
5335 * Returns the address of the matching extended capability structure within
5336 * the device's PCI configuration space or 0 if the device does not support
5339 static int siov_find_pci_dvsec(struct pci_dev *pdev)
5344 pos = pci_find_next_ext_capability(pdev, 0, 0x23);
5346 pci_read_config_word(pdev, pos + 4, &vendor);
5347 pci_read_config_word(pdev, pos + 8, &id);
5348 if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
5351 pos = pci_find_next_ext_capability(pdev, pos, 0x23);
5358 intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
5360 struct device_domain_info *info = get_domain_info(dev);
5362 if (feat == IOMMU_DEV_FEAT_AUX) {
5365 if (!dev_is_pci(dev) || dmar_disabled ||
5366 !scalable_mode_support() || !pasid_mode_support())
5369 ret = pci_pasid_features(to_pci_dev(dev));
5373 return !!siov_find_pci_dvsec(to_pci_dev(dev));
5376 if (feat == IOMMU_DEV_FEAT_IOPF)
5377 return info && info->pri_supported;
5379 if (feat == IOMMU_DEV_FEAT_SVA)
5380 return info && (info->iommu->flags & VTD_FLAG_SVM_CAPABLE) &&
5381 info->pasid_supported && info->pri_supported &&
5382 info->ats_supported;
5388 intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
5390 if (feat == IOMMU_DEV_FEAT_AUX)
5391 return intel_iommu_enable_auxd(dev);
5393 if (feat == IOMMU_DEV_FEAT_IOPF)
5394 return intel_iommu_dev_has_feat(dev, feat) ? 0 : -ENODEV;
5396 if (feat == IOMMU_DEV_FEAT_SVA) {
5397 struct device_domain_info *info = get_domain_info(dev);
5402 if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled)
5405 if (info->iommu->flags & VTD_FLAG_SVM_CAPABLE)
5413 intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
5415 if (feat == IOMMU_DEV_FEAT_AUX)
5416 return intel_iommu_disable_auxd(dev);
5422 intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
5424 struct device_domain_info *info = get_domain_info(dev);
5426 if (feat == IOMMU_DEV_FEAT_AUX)
5427 return scalable_mode_support() && info && info->auxd_enabled;
5433 intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
5435 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5437 return dmar_domain->default_pasid > 0 ?
5438 dmar_domain->default_pasid : -EINVAL;
5441 static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
5444 return attach_deferred(dev);
5448 intel_iommu_enable_nesting(struct iommu_domain *domain)
5450 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5451 unsigned long flags;
5454 spin_lock_irqsave(&device_domain_lock, flags);
5455 if (nested_mode_support() && list_empty(&dmar_domain->devices)) {
5456 dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
5457 dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
5460 spin_unlock_irqrestore(&device_domain_lock, flags);
5466 * Check that the device does not live on an external facing PCI port that is
5467 * marked as untrusted. Such devices should not be able to apply quirks and
5468 * thus not be able to bypass the IOMMU restrictions.
5470 static bool risky_device(struct pci_dev *pdev)
5472 if (pdev->untrusted) {
5474 "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
5475 pdev->vendor, pdev->device);
5476 pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
5482 static void clflush_sync_map(struct dmar_domain *domain, unsigned long clf_pfn,
5483 unsigned long clf_pages)
5485 struct dma_pte *first_pte = NULL, *pte = NULL;
5486 unsigned long lvl_pages = 0;
5489 while (clf_pages > 0) {
5492 pte = pfn_to_dma_pte(domain, clf_pfn, &level);
5496 lvl_pages = lvl_to_nr_pages(level);
5499 if (WARN_ON(!lvl_pages || clf_pages < lvl_pages))
5502 clf_pages -= lvl_pages;
5503 clf_pfn += lvl_pages;
5506 if (!clf_pages || first_pte_in_page(pte) ||
5507 (level > 1 && clf_pages < lvl_pages)) {
5508 domain_flush_cache(domain, first_pte,
5509 (void *)pte - (void *)first_pte);
5515 static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
5516 unsigned long iova, size_t size)
5518 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5519 unsigned long pages = aligned_nrpages(iova, size);
5520 unsigned long pfn = iova >> VTD_PAGE_SHIFT;
5521 struct intel_iommu *iommu;
5524 if (!dmar_domain->iommu_coherency)
5525 clflush_sync_map(dmar_domain, pfn, pages);
5527 for_each_domain_iommu(iommu_id, dmar_domain) {
5528 iommu = g_iommus[iommu_id];
5529 __mapping_notify_one(iommu, dmar_domain, pfn, pages);
5533 const struct iommu_ops intel_iommu_ops = {
5534 .capable = intel_iommu_capable,
5535 .domain_alloc = intel_iommu_domain_alloc,
5536 .domain_free = intel_iommu_domain_free,
5537 .enable_nesting = intel_iommu_enable_nesting,
5538 .attach_dev = intel_iommu_attach_device,
5539 .detach_dev = intel_iommu_detach_device,
5540 .aux_attach_dev = intel_iommu_aux_attach_device,
5541 .aux_detach_dev = intel_iommu_aux_detach_device,
5542 .aux_get_pasid = intel_iommu_aux_get_pasid,
5543 .map = intel_iommu_map,
5544 .iotlb_sync_map = intel_iommu_iotlb_sync_map,
5545 .unmap = intel_iommu_unmap,
5546 .flush_iotlb_all = intel_flush_iotlb_all,
5547 .iotlb_sync = intel_iommu_tlb_sync,
5548 .iova_to_phys = intel_iommu_iova_to_phys,
5549 .probe_device = intel_iommu_probe_device,
5550 .probe_finalize = intel_iommu_probe_finalize,
5551 .release_device = intel_iommu_release_device,
5552 .get_resv_regions = intel_iommu_get_resv_regions,
5553 .put_resv_regions = generic_iommu_put_resv_regions,
5554 .device_group = intel_iommu_device_group,
5555 .dev_has_feat = intel_iommu_dev_has_feat,
5556 .dev_feat_enabled = intel_iommu_dev_feat_enabled,
5557 .dev_enable_feat = intel_iommu_dev_enable_feat,
5558 .dev_disable_feat = intel_iommu_dev_disable_feat,
5559 .is_attach_deferred = intel_iommu_is_attach_deferred,
5560 .def_domain_type = device_def_domain_type,
5561 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5562 #ifdef CONFIG_INTEL_IOMMU_SVM
5563 .cache_invalidate = intel_iommu_sva_invalidate,
5564 .sva_bind_gpasid = intel_svm_bind_gpasid,
5565 .sva_unbind_gpasid = intel_svm_unbind_gpasid,
5566 .sva_bind = intel_svm_bind,
5567 .sva_unbind = intel_svm_unbind,
5568 .sva_get_pasid = intel_svm_get_pasid,
5569 .page_response = intel_svm_page_response,
5573 static void quirk_iommu_igfx(struct pci_dev *dev)
5575 if (risky_device(dev))
5578 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5582 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5583 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
5584 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
5585 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
5586 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
5587 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
5588 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
5589 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
5591 /* Broadwell igfx malfunctions with dmar */
5592 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
5593 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
5594 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
5595 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
5596 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
5597 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
5598 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
5599 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
5600 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
5601 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
5602 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
5603 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
5604 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
5605 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
5606 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
5607 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
5608 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
5609 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
5610 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
5611 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
5612 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
5613 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
5614 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
5615 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
5617 static void quirk_iommu_rwbf(struct pci_dev *dev)
5619 if (risky_device(dev))
5623 * Mobile 4 Series Chipset neglects to set RWBF capability,
5624 * but needs it. Same seems to hold for the desktop versions.
5626 pci_info(dev, "Forcing write-buffer flush capability\n");
5630 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5631 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5632 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5633 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5634 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5635 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5636 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5639 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5640 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5641 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5642 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5643 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5644 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5645 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5646 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5648 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5652 if (risky_device(dev))
5655 if (pci_read_config_word(dev, GGC, &ggc))
5658 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5659 pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5661 } else if (dmar_map_gfx) {
5662 /* we have to ensure the gfx device is idle before we flush */
5663 pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
5664 intel_iommu_strict = 1;
5667 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5668 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5669 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5670 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5672 static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
5676 if (!IS_GFX_DEVICE(dev))
5679 ver = (dev->device >> 8) & 0xff;
5680 if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
5681 ver != 0x4e && ver != 0x8a && ver != 0x98 &&
5685 if (risky_device(dev))
5688 pci_info(dev, "Skip IOMMU disabling for graphics\n");
5689 iommu_skip_te_disable = 1;
5691 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
5693 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5694 ISOCH DMAR unit for the Azalia sound device, but not give it any
5695 TLB entries, which causes it to deadlock. Check for that. We do
5696 this in a function called from init_dmars(), instead of in a PCI
5697 quirk, because we don't want to print the obnoxious "BIOS broken"
5698 message if VT-d is actually disabled.
5700 static void __init check_tylersburg_isoch(void)
5702 struct pci_dev *pdev;
5703 uint32_t vtisochctrl;
5705 /* If there's no Azalia in the system anyway, forget it. */
5706 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5710 if (risky_device(pdev)) {
5717 /* System Management Registers. Might be hidden, in which case
5718 we can't do the sanity check. But that's OK, because the
5719 known-broken BIOSes _don't_ actually hide it, so far. */
5720 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5724 if (risky_device(pdev)) {
5729 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5736 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5737 if (vtisochctrl & 1)
5740 /* Drop all bits other than the number of TLB entries */
5741 vtisochctrl &= 0x1c;
5743 /* If we have the recommended number of TLB entries (16), fine. */
5744 if (vtisochctrl == 0x10)
5747 /* Zero TLB entries? You get to ride the short bus to school. */
5749 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5750 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5751 dmi_get_system_info(DMI_BIOS_VENDOR),
5752 dmi_get_system_info(DMI_BIOS_VERSION),
5753 dmi_get_system_info(DMI_PRODUCT_VERSION));
5754 iommu_identity_mapping |= IDENTMAP_AZALIA;
5758 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",