Note that using this option lowers the security
provided by tboot because it makes the system
vulnerable to DMA attacks.
++++++++ + nobounce [Default off]
++++++++ + Disable bounce buffer for unstrusted devices such as
++++++++ + the Thunderbolt devices. This will treat the untrusted
++++++++ + devices as the trusted ones, hence might expose security
++++++++ + risks of DMA attacks.
intel_idle.max_cstate= [KNL,HW,ACPI,X86]
0 disables intel_idle and fall back on acpi_idle.
synchronously.
iommu.passthrough=
--------- [ARM64] Configure DMA to bypass the IOMMU by default.
+++++++++ [ARM64, X86] Configure DMA to bypass the IOMMU by default.
Format: { "0" | "1" }
0 - Use IOMMU translation for DMA.
1 - Bypass the IOMMU for DMA.
expose users to several CPU vulnerabilities.
Equivalent to: nopti [X86,PPC]
kpti=0 [ARM64]
- - - nospectre_v1 [PPC]
+ + + nospectre_v1 [X86,PPC]
nobp=0 [S390]
nospectre_v2 [X86,PPC,S390,ARM64]
spectre_v2_user=off [X86]
nosmt=force: Force disable SMT, cannot be undone
via the sysfs control file.
- - - nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
- - - check bypass). With this option data leaks are possible
- - - in the system.
+ + + nospectre_v1 [X86,PPC] Disable mitigations for Spectre Variant 1
+ + + (bounds check bypass). With this option data leaks are
+ + + possible in the system.
nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
the Spectre variant 2 (indirect branch prediction)
Run specified binary instead of /init from the ramdisk,
used for early userspace startup. See initrd.
+++ +++ rdrand= [X86]
+++ +++ force - Override the decision by the kernel to hide the
+++ +++ advertisement of RDRAND support (this affects
+++ +++ certain AMD processors because of buggy BIOS
+++ +++ support, specifically around the suspend/resume
+++ +++ path).
+++ +++
rdt= [HW,X86,RDT]
Turn on/off individual RDT features. List is:
cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
S: Maintained
--- ---F: drivers/net/ethernet/realtek/r8169.c
+++ +++F: drivers/net/ethernet/realtek/r8169*
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
F: drivers/crypto/sunxi-ss/
ALLWINNER VPU DRIVER
S: Maintained
S: Maintained
--- -----F: drivers/iommu/arm-smmu.c
--- -----F: drivers/iommu/arm-smmu-v3.c
+++ +++++F: drivers/iommu/arm-smmu*
F: drivers/iommu/io-pgtable-arm.c
F: drivers/iommu/io-pgtable-arm-v7s.c
F: drivers/clk/sunxi/
ARM/Allwinner sunXi SoC support
S: Maintained
F: fs/cachefiles/
CADENCE MIPI-CSI2 BRIDGES
S: Maintained
F: Documentation/devicetree/bindings/media/cdns,*.txt
DRM DRIVERS AND MISC GPU PATCHES
W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
S: Maintained
F: include/linux/vga*
DRM DRIVERS FOR ALLWINNER A10
S: Supported
F: drivers/gpu/drm/sun4i/
S: Maintained
--- ---F: Documentation/ABI/testing/sysfs-bus-mdio
+++ +++F: Documentation/ABI/testing/sysfs-class-net-phydev
F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
F: Documentation/devicetree/bindings/net/mdio*
F: Documentation/networking/phy.rst
S: Maintained
- - -T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
+ + +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
Q: http://patchwork.kernel.org/project/linux-fpga/list/
F: Documentation/fpga/
F: Documentation/driver-api/fpga/
- - -T: git git://github.com/bzolnier/linux.git
+ + +T: git git://anongit.freedesktop.org/drm/drm-misc
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
S: Maintained
F: Documentation/fb/
F: drivers/perf/fsl_imx8_ddr_perf.c
F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
++ + +FREESCALE IMX I2C DRIVER
++ + +S: Maintained
++ + +F: drivers/i2c/busses/i2c-imx.c
++ + +F: Documentation/devicetree/bindings/i2c/i2c-imx.txt
++ + +
FREESCALE IMX LPI2C DRIVER
F: fs/gfs2/
F: include/uapi/linux/gfs2_ondisk.h
- - -GIGASET ISDN DRIVERS
- - -W: http://gigaset307x.sourceforge.net/
- - -S: Odd Fixes
- - -F: drivers/staging/isdn/gigaset/
- - -
GNSS SUBSYSTEM
T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git
F: drivers/scsi/storvsc_drv.c
F: drivers/uio/uio_hv_generic.c
F: drivers/video/fbdev/hyperv_fb.c
-- - -F: drivers/iommu/hyperv_iommu.c
++ + +F: drivers/iommu/hyperv-iommu.c
F: net/vmw_vsock/hyperv_transport.c
F: include/clocksource/hyperv_timer.h
F: include/linux/hyperv.h
S: Maintained
----- ---F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+++++ +++F: Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
F: drivers/i2c/busses/i2c-mv64xxx.c
I2C OVER PARALLEL PORT
F: drivers/video/fbdev/i810/
INTEL ASoC DRIVERS
S: Supported
F: drivers/scsi/isci/
++ + +INTEL CPU family model numbers
++ + +S: Supported
++ + +F: arch/x86/include/asm/intel-family.h
++ + +
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
S: Supported
-- - -F: fs/iomap.c
F: fs/iomap/
F: include/linux/iomap.h
F: fs/io_uring.c
F: include/uapi/linux/io_uring.h
----- ---IP MASQUERADING
----- ---S: Maintained
----- ---F: net/ipv4/netfilter/ipt_MASQUERADE.c
----- ---
IPMI SUBSYSTEM
F: tools/kvm/
F: tools/testing/selftests/kvm/
--- ---KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
--- ---W: http://www.linux-kvm.org/
--- ---S: Maintained
--- ---F: arch/x86/include/asm/svm.h
--- ---F: arch/x86/kvm/svm.c
--- ---
KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
--- ---L: linux-s390@vger.kernel.org
+++ +++L: kvm@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
S: Supported
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
W: http://www.linux-kvm.org
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
F: arch/x86/kvm/
F: arch/x86/kvm/*/
F: arch/x86/include/uapi/asm/kvm*
+++ +++F: arch/x86/include/uapi/asm/vmx.h
+++ +++F: arch/x86/include/uapi/asm/svm.h
F: arch/x86/include/asm/kvm*
F: arch/x86/include/asm/pvclock-abi.h
+++ +++F: arch/x86/include/asm/svm.h
+++ +++F: arch/x86/include/asm/vmx.h
F: arch/x86/kernel/kvm.c
F: arch/x86/kernel/kvmclock.c
F: include/linux/libnvdimm.h
F: include/uapi/linux/ndctl.h
+++++ +++LICENSES and SPDX stuff
+++++ +++S: Maintained
+++++ +++T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/spdx.git
+++++ +++F: COPYING
+++++ +++F: Documentation/process/license-rules.rst
+++++ +++F: LICENSES/
+++++ +++F: scripts/spdxcheck-test.sh
+++++ +++F: scripts/spdxcheck.py
+++++ +++
LIGHTNVM PLATFORM SUPPORT
W: http://github/OpenChannelSSD
S: Supported
----- ---F: driver/net/net_failover.c
+++++ +++F: drivers/net/net_failover.c
F: include/net/net_failover.h
F: Documentation/networking/net_failover.rst
S: Maintained
W: https://fedorahosted.org/dropwatch/
F: net/core/drop_monitor.c
+ + +F: include/uapi/linux/net_dropmon.h
NETWORKING DRIVERS
S: Maintained
F: net/tls/*
F: drivers/net/phy/sfp*
F: include/linux/phylink.h
F: include/linux/sfp.h
+++++ +++K: phylink
SGI GRU DRIVER
F: include/uapi/linux/arm_sdei.h
SOFTWARE RAID (Multiple Disks) SUPPORT
--- ---M: Shaohua Li <shli@kernel.org>
+++ +++M: Song Liu <song@kernel.org>
--- ---T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git
+++ +++T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
S: Supported
F: drivers/md/Makefile
F: drivers/md/Kconfig
F: drivers/net/ethernet/ti/netcp*
TI PCM3060 ASoC CODEC DRIVER
- - -M: Kirill Marinushkin <kmarinushkin@birdec.tech>
+ + +M: Kirill Marinushkin <kmarinushkin@birdec.com>
S: Maintained
F: Documentation/devicetree/bindings/sound/pcm3060.txt
S: Supported
F: net/core/xdp.c
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
------- --obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+++++++ ++obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
--- -----obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+++ +++++obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
++++++++ +obj-$(CONFIG_INTEL_IOMMU) += intel-trace.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
* invalid address), we ignore the capability for the device so
* it'll be forced to go into translation mode.
*/
--------- if ((iommu_pass_through || !amd_iommu_force_isolation) &&
+++++++++ if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
struct amd_iommu *iommu;
iommu_completion_wait(iommu);
}
+++++++++static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
+++++++++{
+++++++++ struct iommu_cmd cmd;
+++++++++
+++++++++ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+++++++++ dom_id, 1);
+++++++++ iommu_queue_command(iommu, &cmd);
+++++++++
+++++++++ iommu_completion_wait(iommu);
+++++++++}
+++++++++
static void amd_iommu_flush_all(struct amd_iommu *iommu)
{
struct iommu_cmd cmd;
* another level increases the size of the address space by 9 bits to a size up
* to 64 bits.
*/
---------static bool increase_address_space(struct protection_domain *domain,
+++++++++static void increase_address_space(struct protection_domain *domain,
gfp_t gfp)
{
+++++++++ unsigned long flags;
u64 *pte;
--------- if (domain->mode == PAGE_MODE_6_LEVEL)
+++++++++ spin_lock_irqsave(&domain->lock, flags);
+++++++++
+++++++++ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
/* address space already 64 bit large */
--------- return false;
+++++++++ goto out;
pte = (void *)get_zeroed_page(gfp);
if (!pte)
--------- return false;
+++++++++ goto out;
*pte = PM_LEVEL_PDE(domain->mode,
iommu_virt_to_phys(domain->pt_root));
domain->mode += 1;
domain->updated = true;
--------- return true;
+++++++++out:
+++++++++ spin_unlock_irqrestore(&domain->lock, flags);
+++++++++
+++++++++ return;
}
static u64 *alloc_pte(struct protection_domain *domain,
{
u64 pte_root = 0;
u64 flags = 0;
+++++++++ u32 old_domid;
if (domain->mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->pt_root);
flags &= ~DEV_DOMID_MASK;
flags |= domain->id;
+++++++++ old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
amd_iommu_dev_table[devid].data[1] = flags;
amd_iommu_dev_table[devid].data[0] = pte_root;
+++++++++
+++++++++ /*
+++++++++ * A kdump kernel might be replacing a domain ID that was copied from
+++++++++ * the previous kernel--if so, it needs to flush the translation cache
+++++++++ * entries for the old domain ID that is being overwritten
+++++++++ */
+++++++++ if (old_domid) {
+++++++++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+++++++++
+++++++++ amd_iommu_flush_tlb_domid(iommu, old_domid);
+++++++++ }
}
static void clear_dte_entry(u16 devid)
BUG_ON(!dev_data);
--------- if (iommu_pass_through || dev_data->iommu_v2)
+++++++++ if (dev_data->iommu_v2)
iommu_request_dm_for_dev(dev);
/* Domains are initialized for this device - have a look what we ended up with */
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
------- -- ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+++++++ ++ ret = iommu_map_page(domain, bus_addr, phys_addr,
+++++++ ++ PAGE_SIZE, prot,
+++++++ ++ GFP_ATOMIC | __GFP_NOWARN);
if (ret)
goto out_unmap;
int __init amd_iommu_init_dma_ops(void)
{
--------- swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
+++++++++ swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
iommu_detected = 1;
if (amd_iommu_unmap_flush)
}
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
--- ---- size_t page_size)
+++ ++++ size_t page_size,
+++ ++++ struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
size_t unmap_size;
domain_flush_complete(dom);
}
--- ---- static void amd_iommu_iotlb_range_add(struct iommu_domain *domain,
--- ---- unsigned long iova, size_t size)
+++ ++++ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+++ ++++ struct iommu_iotlb_gather *gather)
{
+++ ++++ amd_iommu_flush_iotlb_all(domain);
}
const struct iommu_ops amd_iommu_ops = {
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
--- ---- .iotlb_range_add = amd_iommu_iotlb_range_add,
--- ---- .iotlb_sync = amd_iommu_flush_iotlb_all,
+++ ++++ .iotlb_sync = amd_iommu_iotlb_sync,
};
/*****************************************************************************
.deactivate = irq_remapping_deactivate,
};
+++++++ ++int amd_iommu_activate_guest_mode(void *data)
+++++++ ++{
+++++++ ++ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+++++++ ++ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+++++++ ++
+++++++ ++ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+++++++ ++ !entry || entry->lo.fields_vapic.guest_mode)
+++++++ ++ return 0;
+++++++ ++
+++++++ ++ entry->lo.val = 0;
+++++++ ++ entry->hi.val = 0;
+++++++ ++
+++++++ ++ entry->lo.fields_vapic.guest_mode = 1;
+++++++ ++ entry->lo.fields_vapic.ga_log_intr = 1;
+++++++ ++ entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
+++++++ ++ entry->hi.fields.vector = ir_data->ga_vector;
+++++++ ++ entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
+++++++ ++
+++++++ ++ return modify_irte_ga(ir_data->irq_2_irte.devid,
+++++++ ++ ir_data->irq_2_irte.index, entry, NULL);
+++++++ ++}
+++++++ ++EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
+++++++ ++
+++++++ ++int amd_iommu_deactivate_guest_mode(void *data)
+++++++ ++{
+++++++ ++ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+++++++ ++ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+++++++ ++ struct irq_cfg *cfg = ir_data->cfg;
+++++++ ++
+++++++ ++ if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+++++++ ++ !entry || !entry->lo.fields_vapic.guest_mode)
+++++++ ++ return 0;
+++++++ ++
+++++++ ++ entry->lo.val = 0;
+++++++ ++ entry->hi.val = 0;
+++++++ ++
+++++++ ++ entry->lo.fields_remap.dm = apic->irq_dest_mode;
+++++++ ++ entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
+++++++ ++ entry->hi.fields.vector = cfg->vector;
+++++++ ++ entry->lo.fields_remap.destination =
+++++++ ++ APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
+++++++ ++ entry->hi.fields.destination =
+++++++ ++ APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
+++++++ ++
+++++++ ++ return modify_irte_ga(ir_data->irq_2_irte.devid,
+++++++ ++ ir_data->irq_2_irte.index, entry, NULL);
+++++++ ++}
+++++++ ++EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
+++++++ ++
static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
{
+++++++ ++ int ret;
struct amd_iommu *iommu;
struct amd_iommu_pi_data *pi_data = vcpu_info;
struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
struct amd_ir_data *ir_data = data->chip_data;
------- -- struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
if (!dev_data || !dev_data->use_vapic)
return 0;
+++++++ ++ ir_data->cfg = irqd_cfg(data);
pi_data->ir_data = ir_data;
/* Note:
pi_data->prev_ga_tag = ir_data->cached_ga_tag;
if (pi_data->is_guest_mode) {
------- -- /* Setting */
------- -- irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
------- -- irte->hi.fields.vector = vcpu_pi_info->vector;
------- -- irte->lo.fields_vapic.ga_log_intr = 1;
------- -- irte->lo.fields_vapic.guest_mode = 1;
------- -- irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
------- --
------- -- ir_data->cached_ga_tag = pi_data->ga_tag;
+++++++ ++ ir_data->ga_root_ptr = (pi_data->base >> 12);
+++++++ ++ ir_data->ga_vector = vcpu_pi_info->vector;
+++++++ ++ ir_data->ga_tag = pi_data->ga_tag;
+++++++ ++ ret = amd_iommu_activate_guest_mode(ir_data);
+++++++ ++ if (!ret)
+++++++ ++ ir_data->cached_ga_tag = pi_data->ga_tag;
} else {
------- -- /* Un-Setting */
------- -- struct irq_cfg *cfg = irqd_cfg(data);
------- --
------- -- irte->hi.val = 0;
------- -- irte->lo.val = 0;
------- -- irte->hi.fields.vector = cfg->vector;
------- -- irte->lo.fields_remap.guest_mode = 0;
------- -- irte->lo.fields_remap.destination =
------- -- APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
------- -- irte->hi.fields.destination =
------- -- APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
------- -- irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
------- -- irte->lo.fields_remap.dm = apic->irq_dest_mode;
+++++++ ++ ret = amd_iommu_deactivate_guest_mode(ir_data);
/*
* This communicates the ga_tag back to the caller
* so that it can do all the necessary clean up.
*/
------- -- ir_data->cached_ga_tag = 0;
+++++++ ++ if (!ret)
+++++++ ++ ir_data->cached_ga_tag = 0;
}
------- -- return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
+++++++ ++ return ret;
}
u64 size, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
--------- struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn;
+++++++++ struct iova_domain *iovad;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
+++++++++ iovad = &cookie->iovad;
+++++++++
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, dma_addr);
+++ ++++ struct iommu_iotlb_gather iotlb_gather;
+++ ++++ size_t unmapped;
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
+++ ++++ iommu_iotlb_gather_init(&iotlb_gather);
+++ ++++
+++ ++++ unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
+++ ++++ WARN_ON(unmapped != size);
--- ---- WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
if (!cookie->fq_domain)
--- ---- iommu_tlb_sync(domain);
+++ ++++ iommu_tlb_sync(domain, &iotlb_gather);
iommu_dma_free_iova(cookie, dma_addr, size);
}
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
-- - - size_t iova_off = 0;
++ + + struct iova_domain *iovad = &cookie->iovad;
++ + + size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
-- - - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
-- - - iova_off = iova_offset(&cookie->iovad, phys);
-- - - size = iova_align(&cookie->iovad, size + iova_off);
-- - - }
++ + + size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
-- - - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
++ + + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
* - and wouldn't make the resulting output segment too long
*/
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
-- - - (cur_len + s_length <= max_len)) {
++ + + (max_len - cur_len >= s_length)) {
/* ...then concatenate it with the previous one */
cur_len += s_length;
} else {
{
bool coherent = dev_is_dma_coherent(dev);
size_t alloc_size = PAGE_ALIGN(size);
+++ +++ int node = dev_to_node(dev);
struct page *page = NULL;
void *cpu_addr;
page = dma_alloc_contiguous(dev, alloc_size, gfp);
+++ +++ if (!page)
+++ +++ page = alloc_pages_node(node, gfp, get_order(alloc_size));
if (!page)
return NULL;
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
-- - - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
++ + + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0));
unsigned long pfn, off = vma->vm_pgoff;
int ret;
-- - - vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
++ + + vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (!msi_page)
return NULL;
-- - - iova = __iommu_dma_map(dev, msi_addr, size, prot);
-- - - if (iova == DMA_MAPPING_ERROR)
++ + + iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
++ + + if (!iova)
goto out_free_page;
++ + + if (iommu_map(domain, iova, msi_addr, size, prot))
++ + + goto out_free_iova;
++ + +
INIT_LIST_HEAD(&msi_page->list);
msi_page->phys = msi_addr;
msi_page->iova = iova;
list_add(&msi_page->list, &cookie->msi_page_list);
return msi_page;
++ + +out_free_iova:
++ + + iommu_dma_free_iova(cookie, iova, size);
out_free_page:
kfree(msi_page);
return NULL;
static const struct iommu_ops exynos_iommu_ops;
-- -------static int __init exynos_sysmmu_probe(struct platform_device *pdev)
++ +++++++static int exynos_sysmmu_probe(struct platform_device *pdev)
{
int irq, ret;
struct device *dev = &pdev->dev;
return PTR_ERR(data->sfrbase);
irq = platform_get_irq(pdev, 0);
--------- if (irq <= 0) {
--------- dev_err(dev, "Unable to find IRQ resource\n");
+++++++++ if (irq <= 0)
return irq;
--------- }
ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
dev_name(dev), data);
}
static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
--- ---- unsigned long l_iova, size_t size)
+++ ++++ unsigned long l_iova, size_t size,
+++ ++++ struct iommu_iotlb_gather *gather)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
#include <linux/dma-direct.h>
#include <linux/crash_dump.h>
#include <linux/numa.h>
++++++++ +#include <linux/swiotlb.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
++++++++ +#include <trace/events/intel_iommu.h>
#include "irq_remapping.h"
#include "intel-pasid.h"
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
+++++++++static void domain_context_clear(struct intel_iommu *iommu,
+++++++++ struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
static bool device_is_rmrr_locked(struct device *dev);
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
++++++++ +static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
++++++++ + dma_addr_t iova);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
++++++++ +static int intel_no_bounce;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
++++++++ +#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \
++++++++ + to_pci_dev(d)->untrusted)
++++++++ +
/*
* Iterate over elements in device_domain_list and call the specified
* callback @fn against each element.
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
intel_iommu_tboot_noforce = 1;
++++++++ + } else if (!strncmp(str, "nobounce", 8)) {
++++++++ + pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
++++++++ + intel_no_bounce = 1;
}
str += strcspn(str, ",");
return ret;
}
+++++++++struct domain_context_mapping_data {
+++++++++ struct dmar_domain *domain;
+++++++++ struct intel_iommu *iommu;
+++++++++ struct pasid_table *table;
+++++++++};
+++++++++
+++++++++static int domain_context_mapping_cb(struct pci_dev *pdev,
+++++++++ u16 alias, void *opaque)
+++++++++{
+++++++++ struct domain_context_mapping_data *data = opaque;
+++++++++
+++++++++ return domain_context_mapping_one(data->domain, data->iommu,
+++++++++ data->table, PCI_BUS_NUM(alias),
+++++++++ alias & 0xff);
+++++++++}
+++++++++
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
+++++++++ struct domain_context_mapping_data data;
struct pasid_table *table;
struct intel_iommu *iommu;
u8 bus, devfn;
return -ENODEV;
table = intel_pasid_get_table(dev);
--------- return domain_context_mapping_one(domain, iommu, table, bus, devfn);
+++++++++
+++++++++ if (!dev_is_pci(dev))
+++++++++ return domain_context_mapping_one(domain, iommu, table,
+++++++++ bus, devfn);
+++++++++
+++++++++ data.domain = domain;
+++++++++ data.iommu = iommu;
+++++++++ data.table = table;
+++++++++
+++++++++ return pci_for_each_dma_alias(to_pci_dev(dev),
+++++++++ &domain_context_mapping_cb, &data);
}
static int domain_context_mapped_cb(struct pci_dev *pdev,
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
--------- if (iommu_pass_through)
+++++++++ if (iommu_default_passthrough())
iommu_identity_mapping |= IDENTMAP_ALL;
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
dmar_domain = to_dmar_domain(domain);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
}
++ + + dmar_remove_one_dev_info(dev);
get_private_domain_for_dev(dev);
}
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
++++++++ +
++++++++ + trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
++++++++ +
return start_paddr;
error:
if (dev_is_pci(dev))
pdev = to_pci_dev(dev);
-------- - dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
-------- -
freelist = domain_unmap(domain, start_pfn, last_pfn);
-------- -
if (intel_iommu_strict || (pdev && pdev->untrusted) ||
!has_iova_flush_queue(&domain->iovad)) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
* cpu used up by the iotlb flush operation...
*/
}
++++++++ +
++++++++ + trace_unmap_single(dev, dev_addr, size);
}
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
}
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
++++++++ +
++++++++ + trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
return 0;
}
++++++++ + trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
++++++++ + sg_phys(sglist), size << VTD_PAGE_SHIFT);
++++++++ +
return nelems;
}
.dma_supported = dma_direct_supported,
};
++++++++ +static void
++++++++ +bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
++++++++ + enum dma_data_direction dir, enum dma_sync_target target)
++++++++ +{
++++++++ + struct dmar_domain *domain;
++++++++ + phys_addr_t tlb_addr;
++++++++ +
++++++++ + domain = find_domain(dev);
++++++++ + if (WARN_ON(!domain))
++++++++ + return;
++++++++ +
++++++++ + tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
++++++++ + if (is_swiotlb_buffer(tlb_addr))
++++++++ + swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
++++++++ +}
++++++++ +
++++++++ +static dma_addr_t
++++++++ +bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
++++++++ + enum dma_data_direction dir, unsigned long attrs,
++++++++ + u64 dma_mask)
++++++++ +{
++++++++ + size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
++++++++ + struct dmar_domain *domain;
++++++++ + struct intel_iommu *iommu;
++++++++ + unsigned long iova_pfn;
++++++++ + unsigned long nrpages;
++++++++ + phys_addr_t tlb_addr;
++++++++ + int prot = 0;
++++++++ + int ret;
++++++++ +
++++++++ + domain = find_domain(dev);
++++++++ + if (WARN_ON(dir == DMA_NONE || !domain))
++++++++ + return DMA_MAPPING_ERROR;
++++++++ +
++++++++ + iommu = domain_get_iommu(domain);
++++++++ + if (WARN_ON(!iommu))
++++++++ + return DMA_MAPPING_ERROR;
++++++++ +
++++++++ + nrpages = aligned_nrpages(0, size);
++++++++ + iova_pfn = intel_alloc_iova(dev, domain,
++++++++ + dma_to_mm_pfn(nrpages), dma_mask);
++++++++ + if (!iova_pfn)
++++++++ + return DMA_MAPPING_ERROR;
++++++++ +
++++++++ + /*
++++++++ + * Check if DMAR supports zero-length reads on write only
++++++++ + * mappings..
++++++++ + */
++++++++ + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
++++++++ + !cap_zlr(iommu->cap))
++++++++ + prot |= DMA_PTE_READ;
++++++++ + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
++++++++ + prot |= DMA_PTE_WRITE;
++++++++ +
++++++++ + /*
++++++++ + * If both the physical buffer start address and size are
++++++++ + * page aligned, we don't need to use a bounce page.
++++++++ + */
++++++++ + if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
++++++++ + tlb_addr = swiotlb_tbl_map_single(dev,
++++++++ + __phys_to_dma(dev, io_tlb_start),
++++++++ + paddr, size, aligned_size, dir, attrs);
++++++++ + if (tlb_addr == DMA_MAPPING_ERROR) {
++++++++ + goto swiotlb_error;
++++++++ + } else {
++++++++ + /* Cleanup the padding area. */
++++++++ + void *padding_start = phys_to_virt(tlb_addr);
++++++++ + size_t padding_size = aligned_size;
++++++++ +
++++++++ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
++++++++ + (dir == DMA_TO_DEVICE ||
++++++++ + dir == DMA_BIDIRECTIONAL)) {
++++++++ + padding_start += size;
++++++++ + padding_size -= size;
++++++++ + }
++++++++ +
++++++++ + memset(padding_start, 0, padding_size);
++++++++ + }
++++++++ + } else {
++++++++ + tlb_addr = paddr;
++++++++ + }
++++++++ +
++++++++ + ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
++++++++ + tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
++++++++ + if (ret)
++++++++ + goto mapping_error;
++++++++ +
++++++++ + trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
++++++++ +
++++++++ + return (phys_addr_t)iova_pfn << PAGE_SHIFT;
++++++++ +
++++++++ +mapping_error:
++++++++ + if (is_swiotlb_buffer(tlb_addr))
++++++++ + swiotlb_tbl_unmap_single(dev, tlb_addr, size,
++++++++ + aligned_size, dir, attrs);
++++++++ +swiotlb_error:
++++++++ + free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
++++++++ + dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
++++++++ + size, (unsigned long long)paddr, dir);
++++++++ +
++++++++ + return DMA_MAPPING_ERROR;
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
++++++++ + enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ + size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
++++++++ + struct dmar_domain *domain;
++++++++ + phys_addr_t tlb_addr;
++++++++ +
++++++++ + domain = find_domain(dev);
++++++++ + if (WARN_ON(!domain))
++++++++ + return;
++++++++ +
++++++++ + tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
++++++++ + if (WARN_ON(!tlb_addr))
++++++++ + return;
++++++++ +
++++++++ + intel_unmap(dev, dev_addr, size);
++++++++ + if (is_swiotlb_buffer(tlb_addr))
++++++++ + swiotlb_tbl_unmap_single(dev, tlb_addr, size,
++++++++ + aligned_size, dir, attrs);
++++++++ +
++++++++ + trace_bounce_unmap_single(dev, dev_addr, size);
++++++++ +}
++++++++ +
++++++++ +static dma_addr_t
++++++++ +bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
++++++++ + size_t size, enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ + return bounce_map_single(dev, page_to_phys(page) + offset,
++++++++ + size, dir, attrs, *dev->dma_mask);
++++++++ +}
++++++++ +
++++++++ +static dma_addr_t
++++++++ +bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
++++++++ + enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ + return bounce_map_single(dev, phys_addr, size,
++++++++ + dir, attrs, *dev->dma_mask);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
++++++++ + enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ + bounce_unmap_single(dev, dev_addr, size, dir, attrs);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
++++++++ + enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ + bounce_unmap_single(dev, dev_addr, size, dir, attrs);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
++++++++ + enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ + struct scatterlist *sg;
++++++++ + int i;
++++++++ +
++++++++ + for_each_sg(sglist, sg, nelems, i)
++++++++ + bounce_unmap_page(dev, sg->dma_address,
++++++++ + sg_dma_len(sg), dir, attrs);
++++++++ +}
++++++++ +
++++++++ +static int
++++++++ +bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
++++++++ + enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ + int i;
++++++++ + struct scatterlist *sg;
++++++++ +
++++++++ + for_each_sg(sglist, sg, nelems, i) {
++++++++ + sg->dma_address = bounce_map_page(dev, sg_page(sg),
++++++++ + sg->offset, sg->length,
++++++++ + dir, attrs);
++++++++ + if (sg->dma_address == DMA_MAPPING_ERROR)
++++++++ + goto out_unmap;
++++++++ + sg_dma_len(sg) = sg->length;
++++++++ + }
++++++++ +
++++++++ + return nelems;
++++++++ +
++++++++ +out_unmap:
++++++++ + bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
++++++++ + return 0;
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
++++++++ + size_t size, enum dma_data_direction dir)
++++++++ +{
++++++++ + bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
++++++++ + size_t size, enum dma_data_direction dir)
++++++++ +{
++++++++ + bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
++++++++ + int nelems, enum dma_data_direction dir)
++++++++ +{
++++++++ + struct scatterlist *sg;
++++++++ + int i;
++++++++ +
++++++++ + for_each_sg(sglist, sg, nelems, i)
++++++++ + bounce_sync_single(dev, sg_dma_address(sg),
++++++++ + sg_dma_len(sg), dir, SYNC_FOR_CPU);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
++++++++ + int nelems, enum dma_data_direction dir)
++++++++ +{
++++++++ + struct scatterlist *sg;
++++++++ + int i;
++++++++ +
++++++++ + for_each_sg(sglist, sg, nelems, i)
++++++++ + bounce_sync_single(dev, sg_dma_address(sg),
++++++++ + sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
++++++++ +}
++++++++ +
++++++++ +static const struct dma_map_ops bounce_dma_ops = {
++++++++ + .alloc = intel_alloc_coherent,
++++++++ + .free = intel_free_coherent,
++++++++ + .map_sg = bounce_map_sg,
++++++++ + .unmap_sg = bounce_unmap_sg,
++++++++ + .map_page = bounce_map_page,
++++++++ + .unmap_page = bounce_unmap_page,
++++++++ + .sync_single_for_cpu = bounce_sync_single_for_cpu,
++++++++ + .sync_single_for_device = bounce_sync_single_for_device,
++++++++ + .sync_sg_for_cpu = bounce_sync_sg_for_cpu,
++++++++ + .sync_sg_for_device = bounce_sync_sg_for_device,
++++++++ + .map_resource = bounce_map_resource,
++++++++ + .unmap_resource = bounce_unmap_resource,
++++++++ + .dma_supported = dma_direct_supported,
++++++++ +};
++++++++ +
static inline int iommu_domain_cache_init(void)
{
int ret = 0;
NULL,
};
-------- -static int __init platform_optin_force_iommu(void)
++++++++ +static inline bool has_untrusted_dev(void)
{
struct pci_dev *pdev = NULL;
-------- - bool has_untrusted_dev = false;
-------- - if (!dmar_platform_optin() || no_platform_optin)
-------- - return 0;
++++++++ + for_each_pci_dev(pdev)
++++++++ + if (pdev->untrusted)
++++++++ + return true;
-------- - for_each_pci_dev(pdev) {
-------- - if (pdev->untrusted) {
-------- - has_untrusted_dev = true;
-------- - break;
-------- - }
-------- - }
++++++++ + return false;
++++++++ +}
-------- - if (!has_untrusted_dev)
++++++++ +static int __init platform_optin_force_iommu(void)
++++++++ +{
++++++++ + if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev())
return 0;
if (no_iommu || dmar_disabled)
iommu_identity_mapping |= IDENTMAP_ALL;
dmar_disabled = 0;
-------- -#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-------- - swiotlb = 0;
-------- -#endif
no_iommu = 0;
return 1;
up_write(&dmar_global_lock);
#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-------- - swiotlb = 0;
++++++++ + /*
++++++++ + * If the system has no untrusted device or the user has decided
++++++++ + * to disable the bounce page mechanisms, we don't need swiotlb.
++++++++ + * Mark this and the pre-allocated bounce pages will be released
++++++++ + * later.
++++++++ + */
++++++++ + if (!has_untrusted_dev() || intel_no_bounce)
++++++++ + swiotlb = 0;
#endif
dma_ops = &intel_dma_ops;
return ret;
}
+++++++++static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+++++++++{
+++++++++ struct intel_iommu *iommu = opaque;
+++++++++
+++++++++ domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+++++++++ return 0;
+++++++++}
+++++++++
+++++++++/*
+++++++++ * NB - intel-iommu lacks any sort of reference counting for the users of
+++++++++ * dependent devices. If multiple endpoints have intersecting dependent
+++++++++ * devices, unbinding the driver from any one of them will possibly leave
+++++++++ * the others unable to operate.
+++++++++ */
+++++++++static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
+++++++++{
+++++++++ if (!iommu || !dev || !dev_is_pci(dev))
+++++++++ return;
+++++++++
+++++++++ pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
+++++++++}
+++++++++
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{
struct dmar_domain *domain;
PASID_RID2PASID);
iommu_disable_dev_iotlb(info);
--------- domain_context_clear_one(iommu, info->bus, info->devfn);
+++++++++ domain_context_clear(iommu, info->dev);
intel_pasid_free_table(info->dev);
}
/* free the private domain */
if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
-- - - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
++ + + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
++ + + list_empty(&domain->devices))
domain_exit(info->domain);
free_devinfo_mem(info);
spin_lock_irqsave(&device_domain_lock, flags);
info = dev->archdata.iommu;
-- - - __dmar_remove_one_dev_info(info);
++ + + if (info)
++ + + __dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
}
}
static size_t intel_iommu_unmap(struct iommu_domain *domain,
--- ---- unsigned long iova, size_t size)
+++ ++++ unsigned long iova, size_t size,
+++ ++++ struct iommu_iotlb_gather *gather)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct page *freelist = NULL;
if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
ret = iommu_request_dm_for_dev(dev);
if (ret) {
++ + + dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
domain_add_dev_info(si_domain, dev);
dev_info(dev,
if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
ret = iommu_request_dma_domain_for_dev(dev);
if (ret) {
++ + + dmar_remove_one_dev_info(dev);
dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
if (!get_private_domain_for_dev(dev)) {
dev_warn(dev,
}
}
++++++++ + if (device_needs_bounce(dev)) {
++++++++ + dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
++++++++ + set_dma_ops(dev, &bounce_dma_ops);
++++++++ + }
++++++++ +
return 0;
}
if (!iommu)
return;
++ + + dmar_remove_one_dev_info(dev);
++ + +
iommu_group_remove_device(dev);
iommu_device_unlink(&iommu->iommu, dev);
++++++++ +
++++++++ + if (device_needs_bounce(dev))
++++++++ + set_dma_ops(dev, NULL);
}
static void intel_iommu_get_resv_regions(struct device *device,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
-------- -static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
++++++++ +static void quirk_iommu_igfx(struct pci_dev *dev)
{
-------- - /* G4x/GM45 integrated gfx dmar support is totally busted. */
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0;
}
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
++++++++ +/* G4x/GM45 integrated gfx dmar support is totally busted. */
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
++++++++ +
++++++++ +/* Broadwell igfx malfunctions with dmar */
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
static void quirk_iommu_rwbf(struct pci_dev *dev)
{
bool setup_imbuscr;
bool twobit_imttbcr_sl0;
bool reserved_context;
++++++ +++ bool cache_snoop;
};
struct ipmmu_vmsa_device {
#define IMTTBCR 0x0008
#define IMTTBCR_EAE (1 << 31)
#define IMTTBCR_PMB (1 << 30)
------ ---#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28)
------ ---#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28)
------ ---#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28)
------ ---#define IMTTBCR_SH1_MASK (3 << 28)
------ ---#define IMTTBCR_ORGN1_NC (0 << 26)
------ ---#define IMTTBCR_ORGN1_WB_WA (1 << 26)
------ ---#define IMTTBCR_ORGN1_WT (2 << 26)
------ ---#define IMTTBCR_ORGN1_WB (3 << 26)
------ ---#define IMTTBCR_ORGN1_MASK (3 << 26)
------ ---#define IMTTBCR_IRGN1_NC (0 << 24)
------ ---#define IMTTBCR_IRGN1_WB_WA (1 << 24)
------ ---#define IMTTBCR_IRGN1_WT (2 << 24)
------ ---#define IMTTBCR_IRGN1_WB (3 << 24)
------ ---#define IMTTBCR_IRGN1_MASK (3 << 24)
++++++ +++#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH1_MASK (3 << 28) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_NC (0 << 26) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_WB_WA (1 << 26) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_WT (2 << 26) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_WB (3 << 26) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_MASK (3 << 26) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_NC (0 << 24) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_WB_WA (1 << 24) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_WT (2 << 24) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_WB (3 << 24) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_MASK (3 << 24) /* R-Car Gen2 only */
#define IMTTBCR_TSZ1_MASK (7 << 16)
#define IMTTBCR_TSZ1_SHIFT 16
------ ---#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12)
------ ---#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12)
------ ---#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)
------ ---#define IMTTBCR_SH0_MASK (3 << 12)
------ ---#define IMTTBCR_ORGN0_NC (0 << 10)
------ ---#define IMTTBCR_ORGN0_WB_WA (1 << 10)
------ ---#define IMTTBCR_ORGN0_WT (2 << 10)
------ ---#define IMTTBCR_ORGN0_WB (3 << 10)
------ ---#define IMTTBCR_ORGN0_MASK (3 << 10)
------ ---#define IMTTBCR_IRGN0_NC (0 << 8)
------ ---#define IMTTBCR_IRGN0_WB_WA (1 << 8)
------ ---#define IMTTBCR_IRGN0_WT (2 << 8)
------ ---#define IMTTBCR_IRGN0_WB (3 << 8)
------ ---#define IMTTBCR_IRGN0_MASK (3 << 8)
++++++ +++#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH0_MASK (3 << 12) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_NC (0 << 10) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_WT (2 << 10) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_WB (3 << 10) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_MASK (3 << 10) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_NC (0 << 8) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_WT (2 << 8) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_WB (3 << 8) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_MASK (3 << 8) /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */
++++++ +++#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */
++++++ +++#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
#define IMTTBCR_SL0_LVL_2 (0 << 4)
#define IMTTBCR_SL0_LVL_1 (1 << 4)
#define IMTTBCR_TSZ0_MASK (7 << 0)
#define IMTTBCR_TSZ0_SHIFT O
------ ---#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6)
------ ---#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6)
------ ---#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6)
------ ---
#define IMBUSCR 0x000c
#define IMBUSCR_DVM (1 << 2)
#define IMBUSCR_BUSSEL_SYS (0 << 0)
ipmmu_tlb_invalidate(domain);
}
--- ---- static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
--- ---- size_t granule, bool leaf, void *cookie)
+++ ++++ static void ipmmu_tlb_flush(unsigned long iova, size_t size,
+++ ++++ size_t granule, void *cookie)
{
--- ---- /* The hardware doesn't support selective TLB flush. */
+++ ++++ ipmmu_tlb_flush_all(cookie);
}
--- ---- static const struct iommu_gather_ops ipmmu_gather_ops = {
+++ ++++ static const struct iommu_flush_ops ipmmu_flush_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all,
--- ---- .tlb_add_flush = ipmmu_tlb_add_flush,
--- ---- .tlb_sync = ipmmu_tlb_flush_all,
+++ ++++ .tlb_flush_walk = ipmmu_tlb_flush,
+++ ++++ .tlb_flush_leaf = ipmmu_tlb_flush,
};
/* -----------------------------------------------------------------------------
/*
* TTBCR
------ --- * We use long descriptors with inner-shareable WBWA tables and allocate
------ --- * the whole 32-bit VA space to TTBR0.
++++++ +++ * We use long descriptors and allocate the whole 32-bit VA space to
++++++ +++ * TTBR0.
*/
if (domain->mmu->features->twobit_imttbcr_sl0)
tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
else
tmp = IMTTBCR_SL0_LVL_1;
------ --- ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
------ --- IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
------ --- IMTTBCR_IRGN0_WB_WA | tmp);
++++++ +++ if (domain->mmu->features->cache_snoop)
++++++ +++ tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
++++++ +++ IMTTBCR_IRGN0_WB_WA;
++++++ +++
++++++ +++ ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
/* MAIR0 */
ipmmu_ctx_write_root(domain, IMMAIR0,
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
domain->cfg.ias = 32;
domain->cfg.oas = 40;
--- ---- domain->cfg.tlb = &ipmmu_gather_ops;
+++ ++++ domain->cfg.tlb = &ipmmu_flush_ops;
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
domain->io_domain.geometry.force_aperture = true;
/*
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
--- ---- size_t size)
+++ ++++ size_t size, struct iommu_iotlb_gather *gather)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
--- ---- return domain->iop->unmap(domain->iop, iova, size);
+++ ++++ return domain->iop->unmap(domain->iop, iova, size, gather);
}
--- ---- static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
+++ ++++ static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
ipmmu_tlb_flush_all(domain);
}
+++ ++++ static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
+++ ++++ struct iommu_iotlb_gather *gather)
+++ ++++ {
+++ ++++ ipmmu_flush_iotlb_all(io_domain);
+++ ++++ }
+++ ++++
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
--- ---- .flush_iotlb_all = ipmmu_iotlb_sync,
+++ ++++ .flush_iotlb_all = ipmmu_flush_iotlb_all,
.iotlb_sync = ipmmu_iotlb_sync,
.iova_to_phys = ipmmu_iova_to_phys,
.add_device = ipmmu_add_device,
.setup_imbuscr = true,
.twobit_imttbcr_sl0 = false,
.reserved_context = false,
++++++ +++ .cache_snoop = true,
};
static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.setup_imbuscr = false,
.twobit_imttbcr_sl0 = true,
.reserved_context = true,
++++++ +++ .cache_snoop = false,
};
static const struct of_device_id ipmmu_of_ids[] = {
static const struct iommu_ops omap_iommu_ops;
+ ++++++++struct orphan_dev {
+ ++++++++ struct device *dev;
+ ++++++++ struct list_head node;
+ ++++++++};
+ ++++++++
+ ++++++++static LIST_HEAD(orphan_dev_list);
+ ++++++++
+ ++++++++static DEFINE_SPINLOCK(orphan_lock);
+ ++++++++
#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
/* bitmap of the page sizes currently supported */
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
+ ++++++++static int _omap_iommu_add_device(struct device *dev);
+ ++++++++
/**
* to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
* @dom: generic iommu domain handle
/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
+ ++++++++ *
+ ++++++++ * This should be treated as an deprecated API. It is preserved only
+ ++++++++ * to maintain existing functionality for OMAP3 ISP driver.
**/
void omap_iommu_save_ctx(struct device *dev)
{
/**
* omap_iommu_restore_ctx - Restore registers for pm off-mode support
* @dev: client device
+ ++++++++ *
+ ++++++++ * This should be treated as an deprecated API. It is preserved only
+ ++++++++ * to maintain existing functionality for OMAP3 ISP driver.
**/
void omap_iommu_restore_ctx(struct device *dev)
{
static int iommu_enable(struct omap_iommu *obj)
{
- -------- int err;
- -------- struct platform_device *pdev = to_platform_device(obj->dev);
- -------- struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
- --------
- -------- if (pdata && pdata->deassert_reset) {
- -------- err = pdata->deassert_reset(pdev, pdata->reset_name);
- -------- if (err) {
- -------- dev_err(obj->dev, "deassert_reset failed: %d\n", err);
- -------- return err;
- -------- }
- -------- }
- --------
- -------- pm_runtime_get_sync(obj->dev);
+ ++++++++ int ret;
- -------- err = omap2_iommu_enable(obj);
+ ++++++++ ret = pm_runtime_get_sync(obj->dev);
+ ++++++++ if (ret < 0)
+ ++++++++ pm_runtime_put_noidle(obj->dev);
- -------- return err;
+ ++++++++ return ret < 0 ? ret : 0;
}
static void iommu_disable(struct omap_iommu *obj)
{
- -------- struct platform_device *pdev = to_platform_device(obj->dev);
- -------- struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
- --------
- -------- omap2_iommu_disable(obj);
- --------
pm_runtime_put_sync(obj->dev);
- --------
- -------- if (pdata && pdata->assert_reset)
- -------- pdata->assert_reset(pdev, pdata->reset_name);
}
/*
dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
DMA_TO_DEVICE);
- -------- iommu_disable(obj);
obj->pd_dma = 0;
obj->iopgd = NULL;
+ ++++++++ iommu_disable(obj);
spin_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
}
+ ++++++++static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
+ ++++++++{
+ ++++++++ struct iotlb_lock lock;
+ ++++++++ struct cr_regs cr;
+ ++++++++ struct cr_regs *tmp;
+ ++++++++ int i;
+ ++++++++
+ ++++++++ /* check if there are any locked tlbs to save */
+ ++++++++ iotlb_lock_get(obj, &lock);
+ ++++++++ obj->num_cr_ctx = lock.base;
+ ++++++++ if (!obj->num_cr_ctx)
+ ++++++++ return;
+ ++++++++
+ ++++++++ tmp = obj->cr_ctx;
+ ++++++++ for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
+ ++++++++ * tmp++ = cr;
+ ++++++++}
+ ++++++++
+ ++++++++static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
+ ++++++++{
+ ++++++++ struct iotlb_lock l;
+ ++++++++ struct cr_regs *tmp;
+ ++++++++ int i;
+ ++++++++
+ ++++++++ /* no locked tlbs to restore */
+ ++++++++ if (!obj->num_cr_ctx)
+ ++++++++ return;
+ ++++++++
+ ++++++++ l.base = 0;
+ ++++++++ tmp = obj->cr_ctx;
+ ++++++++ for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
+ ++++++++ l.vict = i;
+ ++++++++ iotlb_lock_set(obj, &l);
+ ++++++++ iotlb_load_cr(obj, tmp);
+ ++++++++ }
+ ++++++++ l.base = obj->num_cr_ctx;
+ ++++++++ l.vict = i;
+ ++++++++ iotlb_lock_set(obj, &l);
+ ++++++++}
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_domain_deactivate - deactivate attached iommu devices
+ ++++++++ * @domain: iommu domain attached to the target iommu device
+ ++++++++ *
+ ++++++++ * This API allows the client devices of IOMMU devices to suspend
+ ++++++++ * the IOMMUs they control at runtime, after they are idled and
+ ++++++++ * suspended all activity. System Suspend will leverage the PM
+ ++++++++ * driver late callbacks.
+ ++++++++ **/
+ ++++++++int omap_iommu_domain_deactivate(struct iommu_domain *domain)
+ ++++++++{
+ ++++++++ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+ ++++++++ struct omap_iommu_device *iommu;
+ ++++++++ struct omap_iommu *oiommu;
+ ++++++++ int i;
+ ++++++++
+ ++++++++ if (!omap_domain->dev)
+ ++++++++ return 0;
+ ++++++++
+ ++++++++ iommu = omap_domain->iommus;
+ ++++++++ iommu += (omap_domain->num_iommus - 1);
+ ++++++++ for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
+ ++++++++ oiommu = iommu->iommu_dev;
+ ++++++++ pm_runtime_put_sync(oiommu->dev);
+ ++++++++ }
+ ++++++++
+ ++++++++ return 0;
+ ++++++++}
+ ++++++++EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_domain_activate - activate attached iommu devices
+ ++++++++ * @domain: iommu domain attached to the target iommu device
+ ++++++++ *
+ ++++++++ * This API allows the client devices of IOMMU devices to resume the
+ ++++++++ * IOMMUs they control at runtime, before they can resume operations.
+ ++++++++ * System Resume will leverage the PM driver late callbacks.
+ ++++++++ **/
+ ++++++++int omap_iommu_domain_activate(struct iommu_domain *domain)
+ ++++++++{
+ ++++++++ struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+ ++++++++ struct omap_iommu_device *iommu;
+ ++++++++ struct omap_iommu *oiommu;
+ ++++++++ int i;
+ ++++++++
+ ++++++++ if (!omap_domain->dev)
+ ++++++++ return 0;
+ ++++++++
+ ++++++++ iommu = omap_domain->iommus;
+ ++++++++ for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
+ ++++++++ oiommu = iommu->iommu_dev;
+ ++++++++ pm_runtime_get_sync(oiommu->dev);
+ ++++++++ }
+ ++++++++
+ ++++++++ return 0;
+ ++++++++}
+ ++++++++EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_runtime_suspend - disable an iommu device
+ ++++++++ * @dev: iommu device
+ ++++++++ *
+ ++++++++ * This function performs all that is necessary to disable an
+ ++++++++ * IOMMU device, either during final detachment from a client
+ ++++++++ * device, or during system/runtime suspend of the device. This
+ ++++++++ * includes programming all the appropriate IOMMU registers, and
+ ++++++++ * managing the associated omap_hwmod's state and the device's
+ ++++++++ * reset line. This function also saves the context of any
+ ++++++++ * locked TLBs if suspending.
+ ++++++++ **/
+ ++++++++static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
+ ++++++++{
+ ++++++++ struct platform_device *pdev = to_platform_device(dev);
+ ++++++++ struct iommu_platform_data *pdata = dev_get_platdata(dev);
+ ++++++++ struct omap_iommu *obj = to_iommu(dev);
+ ++++++++ int ret;
+ ++++++++
+ ++++++++ /* save the TLBs only during suspend, and not for power down */
+ ++++++++ if (obj->domain && obj->iopgd)
+ ++++++++ omap_iommu_save_tlb_entries(obj);
+ ++++++++
+ ++++++++ omap2_iommu_disable(obj);
+ ++++++++
+ ++++++++ if (pdata && pdata->device_idle)
+ ++++++++ pdata->device_idle(pdev);
+ ++++++++
+ ++++++++ if (pdata && pdata->assert_reset)
+ ++++++++ pdata->assert_reset(pdev, pdata->reset_name);
+ ++++++++
+ ++++++++ if (pdata && pdata->set_pwrdm_constraint) {
+ ++++++++ ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
+ ++++++++ if (ret) {
+ ++++++++ dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
+ ++++++++ ret);
+ ++++++++ }
+ ++++++++ }
+ ++++++++
+ ++++++++ return 0;
+ ++++++++}
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_runtime_resume - enable an iommu device
+ ++++++++ * @dev: iommu device
+ ++++++++ *
+ ++++++++ * This function performs all that is necessary to enable an
+ ++++++++ * IOMMU device, either during initial attachment to a client
+ ++++++++ * device, or during system/runtime resume of the device. This
+ ++++++++ * includes programming all the appropriate IOMMU registers, and
+ ++++++++ * managing the associated omap_hwmod's state and the device's
+ ++++++++ * reset line. The function also restores any locked TLBs if
+ ++++++++ * resuming after a suspend.
+ ++++++++ **/
+ ++++++++static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
+ ++++++++{
+ ++++++++ struct platform_device *pdev = to_platform_device(dev);
+ ++++++++ struct iommu_platform_data *pdata = dev_get_platdata(dev);
+ ++++++++ struct omap_iommu *obj = to_iommu(dev);
+ ++++++++ int ret = 0;
+ ++++++++
+ ++++++++ if (pdata && pdata->set_pwrdm_constraint) {
+ ++++++++ ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
+ ++++++++ if (ret) {
+ ++++++++ dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
+ ++++++++ ret);
+ ++++++++ }
+ ++++++++ }
+ ++++++++
+ ++++++++ if (pdata && pdata->deassert_reset) {
+ ++++++++ ret = pdata->deassert_reset(pdev, pdata->reset_name);
+ ++++++++ if (ret) {
+ ++++++++ dev_err(dev, "deassert_reset failed: %d\n", ret);
+ ++++++++ return ret;
+ ++++++++ }
+ ++++++++ }
+ ++++++++
+ ++++++++ if (pdata && pdata->device_enable)
+ ++++++++ pdata->device_enable(pdev);
+ ++++++++
+ ++++++++ /* restore the TLBs only during resume, and not for power up */
+ ++++++++ if (obj->domain)
+ ++++++++ omap_iommu_restore_tlb_entries(obj);
+ ++++++++
+ ++++++++ ret = omap2_iommu_enable(obj);
+ ++++++++
+ ++++++++ return ret;
+ ++++++++}
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
+ ++++++++ * @dev: iommu device
+ ++++++++ *
+ ++++++++ * This function performs the necessary checks to determine if the IOMMU
+ ++++++++ * device needs suspending or not. The function checks if the runtime_pm
+ ++++++++ * status of the device is suspended, and returns 1 in that case. This
+ ++++++++ * results in the PM core to skip invoking any of the Sleep PM callbacks
+ ++++++++ * (suspend, suspend_late, resume, resume_early etc).
+ ++++++++ */
+ ++++++++static int omap_iommu_prepare(struct device *dev)
+ ++++++++{
+ ++++++++ if (pm_runtime_status_suspended(dev))
+ ++++++++ return 1;
+ ++++++++ return 0;
+ ++++++++}
+ ++++++++
static bool omap_iommu_can_register(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct omap_iommu *obj;
struct resource *res;
struct device_node *of = pdev->dev.of_node;
+ ++++++++ struct orphan_dev *orphan_dev, *tmp;
if (!of) {
pr_err("%s: only DT-based devices are supported\n", __func__);
if (!obj)
return -ENOMEM;
+ ++++++++ /*
+ ++++++++ * self-manage the ordering dependencies between omap_device_enable/idle
+ ++++++++ * and omap_device_assert/deassert_hardreset API
+ ++++++++ */
+ ++++++++ if (pdev->dev.pm_domain) {
+ ++++++++ dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
+ ++++++++ pdev->dev.pm_domain = NULL;
+ ++++++++ }
+ ++++++++
obj->name = dev_name(&pdev->dev);
obj->nr_tlb_entries = 32;
err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
obj->dev = &pdev->dev;
obj->ctx = (void *)obj + sizeof(*obj);
+ ++++++++ obj->cr_ctx = devm_kzalloc(&pdev->dev,
+ ++++++++ sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
+ ++++++++ GFP_KERNEL);
+ ++++++++ if (!obj->cr_ctx)
+ ++++++++ return -ENOMEM;
spin_lock_init(&obj->iommu_lock);
spin_lock_init(&obj->page_table_lock);
goto out_sysfs;
}
- -------- pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
omap_iommu_debugfs_add(obj);
dev_info(&pdev->dev, "%s registered\n", obj->name);
+ ++++++++ list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) {
+ ++++++++ err = _omap_iommu_add_device(orphan_dev->dev);
+ ++++++++ if (!err) {
+ ++++++++ list_del(&orphan_dev->node);
+ ++++++++ kfree(orphan_dev);
+ ++++++++ }
+ ++++++++ }
+ ++++++++
return 0;
out_sysfs:
return 0;
}
+ ++++++++static const struct dev_pm_ops omap_iommu_pm_ops = {
+ ++++++++ .prepare = omap_iommu_prepare,
+ ++++++++ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ ++++++++ pm_runtime_force_resume)
+ ++++++++ SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
+ ++++++++ omap_iommu_runtime_resume, NULL)
+ ++++++++};
+ ++++++++
static const struct of_device_id omap_iommu_of_match[] = {
{ .compatible = "ti,omap2-iommu" },
{ .compatible = "ti,omap4-iommu" },
.remove = omap_iommu_remove,
.driver = {
.name = "omap-iommu",
+ ++++++++ .pm = &omap_iommu_pm_ops,
.of_match_table = of_match_ptr(omap_iommu_of_match),
},
};
}
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
--- ---- size_t size)
+++ ++++ size_t size, struct iommu_iotlb_gather *gather)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev;
return ret;
}
- --------static int omap_iommu_add_device(struct device *dev)
+ ++++++++static int _omap_iommu_add_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data, *tmp;
struct omap_iommu *oiommu;
struct platform_device *pdev;
int num_iommus, i;
int ret;
+ ++++++++ struct orphan_dev *orphan_dev;
+ ++++++++ unsigned long flags;
/*
* Allocate the archdata iommu structure for DT-based devices.
}
pdev = of_find_device_by_node(np);
- -------- if (WARN_ON(!pdev)) {
+ ++++++++ if (!pdev) {
of_node_put(np);
kfree(arch_data);
- -------- return -EINVAL;
+ ++++++++ spin_lock_irqsave(&orphan_lock, flags);
+ ++++++++ list_for_each_entry(orphan_dev, &orphan_dev_list,
+ ++++++++ node) {
+ ++++++++ if (orphan_dev->dev == dev)
+ ++++++++ break;
+ ++++++++ }
+ ++++++++ spin_unlock_irqrestore(&orphan_lock, flags);
+ ++++++++
+ ++++++++ if (orphan_dev && orphan_dev->dev == dev)
+ ++++++++ return -EPROBE_DEFER;
+ ++++++++
+ ++++++++ orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL);
+ ++++++++ orphan_dev->dev = dev;
+ ++++++++ spin_lock_irqsave(&orphan_lock, flags);
+ ++++++++ list_add(&orphan_dev->node, &orphan_dev_list);
+ ++++++++ spin_unlock_irqrestore(&orphan_lock, flags);
+ ++++++++ return -EPROBE_DEFER;
}
oiommu = platform_get_drvdata(pdev);
}
tmp->iommu_dev = oiommu;
+ ++++++++ tmp->dev = &pdev->dev;
of_node_put(np);
}
return 0;
}
+ ++++++++static int omap_iommu_add_device(struct device *dev)
+ ++++++++{
+ ++++++++ int ret;
+ ++++++++
+ ++++++++ ret = _omap_iommu_add_device(dev);
+ ++++++++ if (ret == -EPROBE_DEFER)
+ ++++++++ return 0;
+ ++++++++
+ ++++++++ return ret;
+ ++++++++}
+ ++++++++
static void omap_iommu_remove_device(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
static int __init omap_iommu_init(void)
{
struct kmem_cache *p;
- -------- const unsigned long flags = SLAB_HWCACHE_ALIGN;
+ ++++++++ const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
size_t align = 1 << 10; /* L2 pagetable alignement */
struct device_node *np;
int ret;
*/
#include <linux/atomic.h>
+++ +++++#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
--- -----#include "arm-smmu-regs.h"
+++ +++++#include "arm-smmu.h"
#define SMMU_INTR_SEL_NS 0x2000
struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
size_t s = size;
--- ----- iova &= ~12UL;
+++ +++++ iova = (iova >> 12) << 12;
iova |= ctx->asid;
do {
iommu_writel(ctx, reg, iova);
}
}
--- ---- static const struct iommu_gather_ops qcom_gather_ops = {
+++ ++++ static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+++ ++++ size_t granule, void *cookie)
+++ ++++ {
+++ ++++ qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
+++ ++++ qcom_iommu_tlb_sync(cookie);
+++ ++++ }
+++ ++++
+++ ++++ static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
+++ ++++ size_t granule, void *cookie)
+++ ++++ {
+++ ++++ qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
+++ ++++ qcom_iommu_tlb_sync(cookie);
+++ ++++ }
+++ ++++
+++ ++++ static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+++ ++++ unsigned long iova, size_t granule,
+++ ++++ void *cookie)
+++ ++++ {
+++ ++++ qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
+++ ++++ }
+++ ++++
+++ ++++ static const struct iommu_flush_ops qcom_flush_ops = {
.tlb_flush_all = qcom_iommu_tlb_inv_context,
--- ---- .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync,
--- ---- .tlb_sync = qcom_iommu_tlb_sync,
+++ ++++ .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
+++ ++++ .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
+++ ++++ .tlb_add_page = qcom_iommu_tlb_add_page,
};
static irqreturn_t qcom_iommu_fault(int irq, void *dev)
.pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap,
.ias = 32,
.oas = 40,
--- ---- .tlb = &qcom_gather_ops,
+++ ++++ .tlb = &qcom_flush_ops,
.iommu_dev = qcom_iommu->dev,
};
/* TTBRs */
iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] |
--- ----- ((u64)ctx->asid << TTBRn_ASID_SHIFT));
+++ +++++ FIELD_PREP(TTBRn_ASID, ctx->asid));
iommu_writeq(ctx, ARM_SMMU_CB_TTBR1,
pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
--- ----- ((u64)ctx->asid << TTBRn_ASID_SHIFT));
+++ +++++ FIELD_PREP(TTBRn_ASID, ctx->asid));
--- ----- /* TTBCR */
--- ----- iommu_writel(ctx, ARM_SMMU_CB_TTBCR2,
+++ +++++ /* TCR */
+++ +++++ iommu_writel(ctx, ARM_SMMU_CB_TCR2,
(pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) |
--- ----- TTBCR2_SEP_UPSTREAM);
--- ----- iommu_writel(ctx, ARM_SMMU_CB_TTBCR,
+++ +++++ FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
+++ +++++ iommu_writel(ctx, ARM_SMMU_CB_TCR,
pgtbl_cfg.arm_lpae_s1_cfg.tcr);
/* MAIRs (stage-1 only) */
}
static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
--- ---- size_t size)
+++ ++++ size_t size, struct iommu_iotlb_gather *gather)
{
size_t ret;
unsigned long flags;
*/
pm_runtime_get_sync(qcom_domain->iommu->dev);
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
--- ---- ret = ops->unmap(ops, iova, size);
+++ ++++ ret = ops->unmap(ops, iova, size, gather);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
pm_runtime_put_sync(qcom_domain->iommu->dev);
return ret;
}
--- ---- static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
+++ ++++ static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
pm_runtime_put_sync(qcom_domain->iommu->dev);
}
+++ ++++ static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
+++ ++++ struct iommu_iotlb_gather *gather)
+++ ++++ {
+++ ++++ qcom_iommu_flush_iotlb_all(domain);
+++ ++++ }
+++ ++++
static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
.detach_dev = qcom_iommu_detach_dev,
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
--- ---- .flush_iotlb_all = qcom_iommu_iotlb_sync,
+++ ++++ .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
.add_device = qcom_iommu_add_device,
return PTR_ERR(ctx->base);
irq = platform_get_irq(pdev, 0);
--------- if (irq < 0) {
--------- dev_err(dev, "failed to get irq\n");
+++++++++ if (irq < 0)
return -ENODEV;
--------- }
/* clear IRQs before registering fault handler, just in case the
* boot-loader left us a surprise:
struct qcom_iommu_dev *qcom_iommu;
struct device *dev = &pdev->dev;
struct resource *res;
----- ---- int ret, sz, max_asid = 0;
+++++ ++++ int ret, max_asid = 0;
/* find the max asid (which is 1:1 to ctx bank idx), so we know how
* many child ctx devices we have:
for_each_child_of_node(dev->of_node, child)
max_asid = max(max_asid, get_asid(child));
----- ---- sz = sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0]));
----- ----
----- ---- qcom_iommu = devm_kzalloc(dev, sz, GFP_KERNEL);
+++++ ++++ qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
+++++ ++++ GFP_KERNEL);
if (!qcom_iommu)
return -ENOMEM;
qcom_iommu->num_ctxs = max_asid;
#define dma_frcd_type(d) ((d >> 30) & 1)
#define dma_frcd_fault_reason(c) (c & 0xff)
#define dma_frcd_source_id(c) (c & 0xffff)
++++++++ +#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
++++++++ +#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
---------#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
#define QI_EIOTLB_AM(am) (((u64)am))
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
#define QI_RESP_INVALID 0x1
#define QI_RESP_FAILURE 0xf
---------#define QI_GRAN_ALL_ALL 0
---------#define QI_GRAN_NONG_ALL 1
#define QI_GRAN_NONG_PASID 2
#define QI_GRAN_PSI_PASID 3
{
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
-- - - if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
-- - - max_dma = dev->bus_dma_mask;
-- - -
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
+++ +++ size_t alloc_size = PAGE_ALIGN(size);
+++ +++ int node = dev_to_node(dev);
struct page *page = NULL;
u64 phys_mask;
gfp &= ~__GFP_ZERO;
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_mask);
+++ +++ page = dma_alloc_contiguous(dev, alloc_size, gfp);
+++ +++ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+++ +++ dma_free_contiguous(dev, page, alloc_size);
+++ +++ page = NULL;
+++ +++ }
again:
--- --- page = dma_alloc_contiguous(dev, size, gfp);
+++ +++ if (!page)
+++ +++ page = alloc_pages_node(node, gfp, get_order(alloc_size));
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size);
page = NULL;
if (!page)
return NULL;
-- - - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
++ + + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
++ + + !force_dma_unencrypted(dev)) {
/* remove any dirty cache lines on the kernel alias */
if (!PageHighMem(page))
arch_dma_prep_coherent(page, size);
++ + + *dma_handle = phys_to_dma(dev, page_to_phys(page));
/* return the page pointer as the opaque cookie */
return page;
}
{
unsigned int page_order = get_order(size);
-- - - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
++ + + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
++ + + !force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */
__dma_direct_free_pages(dev, size, cpu_addr);
return;
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
if (unlikely(is_swiotlb_buffer(phys)))
-------- - swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
++++++++ + swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
}
EXPORT_SYMBOL(dma_direct_unmap_page);