static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
++++++ static ssize_t deferred_probe_show(struct device *dev,
++++++ struct device_attribute *attr, char *buf)
++++++ {
++++++ bool value;
++++++
++++++ mutex_lock(&deferred_probe_mutex);
++++++ value = !list_empty(&dev->p->deferred_probe);
++++++ mutex_unlock(&deferred_probe_mutex);
++++++
++++++ return sprintf(buf, "%d\n", value);
++++++ }
++++++ DEVICE_ATTR_RO(deferred_probe);
++++++
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
__func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
++++++ device_links_driver_bound(dev);
device_pm_check_callbacks(dev);
{
int ret = -EPROBE_DEFER;
int local_trigger_count = atomic_read(&deferred_trigger_count);
- - -- bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE);
+ + ++ bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
+ + ++ !drv->suppress_bind_attrs;
if (defer_all_probes) {
/*
return ret;
}
++++++ ret = device_links_check_suppliers(dev);
++++++ if (ret)
++++++ return ret;
++++++
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
if (test_remove) {
test_remove = false;
- - -- if (dev->bus && dev->bus->remove)
+ + ++ if (dev->bus->remove)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
++++++ device_links_no_driver(dev);
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
++++++ pm_runtime_get_suppliers(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
if (dev->parent)
pm_runtime_put(dev->parent);
++++++ pm_runtime_put_suppliers(dev);
return ret;
}
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
------ static void __device_release_driver(struct device *dev)
++++++ static void __device_release_driver(struct device *dev, struct device *parent)
{
struct device_driver *drv;
if (driver_allows_async_probing(drv))
async_synchronize_full();
++++++ while (device_links_busy(dev)) {
++++++ device_unlock(dev);
++++++ if (parent)
++++++ device_unlock(parent);
++++++
++++++ device_links_unbind_consumers(dev);
++++++ if (parent)
++++++ device_lock(parent);
++++++
++++++ device_lock(dev);
++++++ /*
++++++ * A concurrent invocation of the same function might
++++++ * have released the driver successfully while this one
++++++ * was waiting, so check for that.
++++++ */
++++++ if (dev->driver != drv)
++++++ return;
++++++ }
++++++
pm_runtime_get_sync(dev);
++++++ pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
++++++
++++++ device_links_driver_cleanup(dev);
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
}
}
++++++ void device_release_driver_internal(struct device *dev,
++++++ struct device_driver *drv,
++++++ struct device *parent)
++++++ {
++++++ if (parent)
++++++ device_lock(parent);
++++++
++++++ device_lock(dev);
++++++ if (!drv || drv == dev->driver)
++++++ __device_release_driver(dev, parent);
++++++
++++++ device_unlock(dev);
++++++ if (parent)
++++++ device_unlock(parent);
++++++ }
++++++
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
++++++ *
++++++ * If this function is to be called with @dev->parent lock held, ensure that
++++++ * the device's consumers are unbound in advance or that their locks can be
++++++ * acquired under the @dev->parent lock.
*/
void device_release_driver(struct device *dev)
{
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
------ device_lock(dev);
------ __device_release_driver(dev);
------ device_unlock(dev);
++++++ device_release_driver_internal(dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(device_release_driver);
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
------
------ if (dev->parent) /* Needed for USB */
------ device_lock(dev->parent);
------ device_lock(dev);
------ if (dev->driver == drv)
------ __device_release_driver(dev);
------ device_unlock(dev);
------ if (dev->parent)
------ device_unlock(dev->parent);
++++++ device_release_driver_internal(dev, drv, dev->parent);
put_device(dev);
}
}
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
++++++ dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);
}
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
++++++ dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
device_for_each_child(dev, &async, dpm_wait_fn);
}
++++++ static void dpm_wait_for_suppliers(struct device *dev, bool async)
++++++ {
++++++ struct device_link *link;
++++++ int idx;
++++++
++++++ idx = device_links_read_lock();
++++++
++++++ /*
++++++ * If the supplier goes away right after we've checked the link to it,
++++++ * we'll wait for its completion to change the state, but that's fine,
++++++ * because the only things that will block as a result are the SRCU
++++++ * callbacks freeing the link objects for the links in the list we're
++++++ * walking.
++++++ */
++++++ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
++++++ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
++++++ dpm_wait(link->supplier, async);
++++++
++++++ device_links_read_unlock(idx);
++++++ }
++++++
++++++ static void dpm_wait_for_superior(struct device *dev, bool async)
++++++ {
++++++ dpm_wait(dev->parent, async);
++++++ dpm_wait_for_suppliers(dev, async);
++++++ }
++++++
++++++ static void dpm_wait_for_consumers(struct device *dev, bool async)
++++++ {
++++++ struct device_link *link;
++++++ int idx;
++++++
++++++ idx = device_links_read_lock();
++++++
++++++ /*
++++++ * The status of a device link can only be changed from "dormant" by a
++++++ * probe, but that cannot happen during system suspend/resume. In
++++++ * theory it can change to "dormant" at that time, but then it is
++++++ * reasonable to wait for the target device anyway (eg. if it goes
++++++ * away, it's better to wait for it to go away completely and then
++++++ * continue instead of trying to continue in parallel with its
++++++ * unregistration).
++++++ */
++++++ list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
++++++ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
++++++ dpm_wait(link->consumer, async);
++++++
++++++ device_links_read_unlock(idx);
++++++ }
++++++
++++++ static void dpm_wait_for_subordinate(struct device *dev, bool async)
++++++ {
++++++ dpm_wait_for_children(dev, async);
++++++ dpm_wait_for_consumers(dev, async);
++++++ }
++++++
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
if (!dev->power.is_noirq_suspended)
goto Out;
------ dpm_wait(dev->parent, async);
++++++ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
if (!dev->power.is_late_suspended)
goto Out;
------ dpm_wait(dev->parent, async);
++++++ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "early power domain ";
goto Complete;
}
------ dpm_wait(dev->parent, async);
++++++ dpm_wait_for_superior(dev, async);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ + ++ dpm_wait_for_children(dev, async);
+ + ++
if (async_error)
goto Complete;
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- - - dpm_wait_for_children(dev, async);
++++++ dpm_wait_for_subordinate(dev, async);
+ + +
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
__pm_runtime_disable(dev, false);
+ + ++ dpm_wait_for_children(dev, async);
+ + ++
if (async_error)
goto Complete;
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- - - dpm_wait_for_children(dev, async);
++++++ dpm_wait_for_subordinate(dev, async);
+ + +
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
return error;
}
++++++ static void dpm_clear_suppliers_direct_complete(struct device *dev)
++++++ {
++++++ struct device_link *link;
++++++ int idx;
++++++
++++++ idx = device_links_read_lock();
++++++
++++++ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
++++++ spin_lock_irq(&link->supplier->power.lock);
++++++ link->supplier->power.direct_complete = false;
++++++ spin_unlock_irq(&link->supplier->power.lock);
++++++ }
++++++
++++++ device_links_read_unlock(idx);
++++++ }
++++++
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
------ dpm_wait_for_children(dev, async);
++++++ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
spin_unlock_irq(&parent->power.lock);
}
++++++ dpm_clear_suppliers_direct_complete(dev);
}
device_unlock(dev);
#define pr_fmt(fmt) "arm-smmu: " fmt
++ ++++#include <linux/acpi.h>
++ ++++#include <linux/acpi_iort.h>
#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
++ ++++#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
#define CB_PAR_F (1 << 0)
#define INVALID_SMENDX -1
#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
+ + ++#define fwspec_smendx(fw, i) \
+ + ++ (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
#define for_each_cfg_sme(fw, i, idx) \
- - -- for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
+ + ++ for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
struct arm_smmu_device {
struct device *dev;
}
}
-- ----static struct iommu_gather_ops arm_smmu_gather_ops = {
++ ++++static const struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync,
return -ENXIO;
}
+ + ++ /*
+ + ++ * FIXME: The arch/arm DMA API code tries to attach devices to its own
+ + ++ * domains between of_xlate() and add_device() - we have no way to cope
+ + ++ * with that, so until ARM gets converted to rely on groups and default
+ + ++ * domains, just say no (but more politely than by dereferencing NULL).
+ + ++ * This should be at least a WARN_ON once that's sorted.
+ + ++ */
+ + ++ if (!fwspec->iommu_priv)
+ + ++ return -ENODEV;
+ + ++
smmu = fwspec_smmu(fwspec);
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
static int arm_smmu_match_node(struct device *dev, void *data)
{
-- ---- return dev->of_node == data;
++ ++++ return dev->fwnode == data;
}
-- ----static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
++ ++++static
++ ++++struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
{
struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
-- ---- np, arm_smmu_match_node);
++ ++++ fwnode, arm_smmu_match_node);
put_device(dev);
return dev ? dev_get_drvdata(dev) : NULL;
}
fwspec = dev->iommu_fwspec;
if (ret)
goto out_free;
- - -- } else if (fwspec) {
- - -- smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+ + ++ } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
- - smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
++ ++++ smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else {
return -ENODEV;
}
}
if (group)
----- - return group;
+++++ + return iommu_group_ref_get(group);
if (dev_is_pci(dev))
group = pci_device_group(dev);
for (i = 0; i < smmu->num_mapping_groups; ++i)
arm_smmu_write_sme(smmu, i);
-- ---- /*
-- ---- * Before clearing ARM_MMU500_ACTLR_CPRE, need to
-- ---- * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
-- ---- * bit is only present in MMU-500r2 onwards.
-- ---- */
-- ---- reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
-- ---- major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
-- ---- if ((smmu->model == ARM_MMU500) && (major >= 2)) {
++ ++++ if (smmu->model == ARM_MMU500) {
++ ++++ /*
++ ++++ * Before clearing ARM_MMU500_ACTLR_CPRE, need to
++ ++++ * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
++ ++++ * bit is only present in MMU-500r2 onwards.
++ ++++ */
++ ++++ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
++ ++++ major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
-- ---- reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
++ ++++ if (major >= 2)
++ ++++ reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
++ ++++ /*
++ ++++ * Allow unmatched Stream IDs to allocate bypass
++ ++++ * TLB entries for reduced latency.
++ ++++ */
++ ++++ reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
}
unsigned long size;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 id;
-- ---- bool cttw_dt, cttw_reg;
++ ++++ bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
int i;
dev_notice(smmu->dev, "probing hardware configuration...\n");
/*
* In order for DMA API calls to work properly, we must defer to what
-- ---- * the DT says about coherency, regardless of what the hardware claims.
++ ++++ * the FW says about coherency, regardless of what the hardware claims.
* Fortunately, this also opens up a workaround for systems where the
* ID register value has ended up configured incorrectly.
*/
-- ---- cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
cttw_reg = !!(id & ID0_CTTW);
-- ---- if (cttw_dt)
-- ---- smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
-- ---- if (cttw_dt || cttw_reg)
++ ++++ if (cttw_fw || cttw_reg)
dev_notice(smmu->dev, "\t%scoherent table walk\n",
-- ---- cttw_dt ? "" : "non-");
-- ---- if (cttw_dt != cttw_reg)
++ ++++ cttw_fw ? "" : "non-");
++ ++++ if (cttw_fw != cttw_reg)
dev_notice(smmu->dev,
-- ---- "\t(IDR0.CTTW overridden by dma-coherent property)\n");
++ ++++ "\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */
size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
-- ----static int arm_smmu_device_dt_probe(struct platform_device *pdev)
++ ++++#ifdef CONFIG_ACPI
++ ++++static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
++ ++++{
++ ++++ int ret = 0;
++ ++++
++ ++++ switch (model) {
++ ++++ case ACPI_IORT_SMMU_V1:
++ ++++ case ACPI_IORT_SMMU_CORELINK_MMU400:
++ ++++ smmu->version = ARM_SMMU_V1;
++ ++++ smmu->model = GENERIC_SMMU;
++ ++++ break;
++ ++++ case ACPI_IORT_SMMU_V2:
++ ++++ smmu->version = ARM_SMMU_V2;
++ ++++ smmu->model = GENERIC_SMMU;
++ ++++ break;
++ ++++ case ACPI_IORT_SMMU_CORELINK_MMU500:
++ ++++ smmu->version = ARM_SMMU_V2;
++ ++++ smmu->model = ARM_MMU500;
++ ++++ break;
++ ++++ default:
++ ++++ ret = -ENODEV;
++ ++++ }
++ ++++
++ ++++ return ret;
++ ++++}
++ ++++
++ ++++static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
++ ++++ struct arm_smmu_device *smmu)
++ ++++{
++ ++++ struct device *dev = smmu->dev;
++ ++++ struct acpi_iort_node *node =
++ ++++ *(struct acpi_iort_node **)dev_get_platdata(dev);
++ ++++ struct acpi_iort_smmu *iort_smmu;
++ ++++ int ret;
++ ++++
++ ++++ /* Retrieve SMMU1/2 specific data */
++ ++++ iort_smmu = (struct acpi_iort_smmu *)node->node_data;
++ ++++
++ ++++ ret = acpi_smmu_get_data(iort_smmu->model, smmu);
++ ++++ if (ret < 0)
++ ++++ return ret;
++ ++++
++ ++++ /* Ignore the configuration access interrupt */
++ ++++ smmu->num_global_irqs = 1;
++ ++++
++ ++++ if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
++ ++++ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
++ ++++
++ ++++ return 0;
++ ++++}
++ ++++#else
++ ++++static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
++ ++++ struct arm_smmu_device *smmu)
++ ++++{
++ ++++ return -ENODEV;
++ ++++}
++ ++++#endif
++ ++++
++ ++++static int arm_smmu_device_dt_probe(struct platform_device *pdev,
++ ++++ struct arm_smmu_device *smmu)
{
const struct arm_smmu_match_data *data;
-- ---- struct resource *res;
-- ---- struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
-- ---- int num_irqs, i, err;
bool legacy_binding;
++ ++++ if (of_property_read_u32(dev->of_node, "#global-interrupts",
++ ++++ &smmu->num_global_irqs)) {
++ ++++ dev_err(dev, "missing #global-interrupts property\n");
++ ++++ return -ENODEV;
++ ++++ }
++ ++++
++ ++++ data = of_device_get_match_data(dev);
++ ++++ smmu->version = data->version;
++ ++++ smmu->model = data->model;
++ ++++
++ ++++ parse_driver_options(smmu);
++ ++++
legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
if (legacy_binding && !using_generic_binding) {
if (!using_legacy_binding)
return -ENODEV;
}
++ ++++ if (of_dma_is_coherent(dev->of_node))
++ ++++ smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
++ ++++
++ ++++ return 0;
++ ++++}
++ ++++
++ ++++static int arm_smmu_device_probe(struct platform_device *pdev)
++ ++++{
++ ++++ struct resource *res;
++ ++++ struct arm_smmu_device *smmu;
++ ++++ struct device *dev = &pdev->dev;
++ ++++ int num_irqs, i, err;
++ ++++
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
dev_err(dev, "failed to allocate arm_smmu_device\n");
}
smmu->dev = dev;
-- ---- data = of_device_get_match_data(dev);
-- ---- smmu->version = data->version;
-- ---- smmu->model = data->model;
++ ++++ if (dev->of_node)
++ ++++ err = arm_smmu_device_dt_probe(pdev, smmu);
++ ++++ else
++ ++++ err = arm_smmu_device_acpi_probe(pdev, smmu);
++ ++++
++ ++++ if (err)
++ ++++ return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
smmu->base = devm_ioremap_resource(dev, res);
return PTR_ERR(smmu->base);
smmu->size = resource_size(res);
-- ---- if (of_property_read_u32(dev->of_node, "#global-interrupts",
-- ---- &smmu->num_global_irqs)) {
-- ---- dev_err(dev, "missing #global-interrupts property\n");
-- ---- return -ENODEV;
-- ---- }
-- ----
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
num_irqs++;
if (err)
return err;
-- ---- parse_driver_options(smmu);
-- ----
if (smmu->version == ARM_SMMU_V2 &&
smmu->num_context_banks != smmu->num_context_irqs) {
dev_err(dev,
}
}
-- ---- of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
++ ++++ iommu_register_instance(dev->fwnode, &arm_smmu_ops);
platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu);
.name = "arm-smmu",
.of_match_table = of_match_ptr(arm_smmu_of_match),
},
-- ---- .probe = arm_smmu_device_dt_probe,
++ ++++ .probe = arm_smmu_device_probe,
.remove = arm_smmu_device_remove,
};
IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
++ ++++#ifdef CONFIG_ACPI
++ ++++static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
++ ++++{
++ ++++ if (iort_node_match(ACPI_IORT_NODE_SMMU))
++ ++++ return arm_smmu_init();
++ ++++
++ ++++ return 0;
++ ++++}
++ ++++IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
++ ++++#endif
++ ++++
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_LICENSE("GPL v2");
}
EXPORT_SYMBOL_GPL(iommu_group_get);
+++++ +/**
+++++ + * iommu_group_ref_get - Increment reference on a group
+++++ + * @group: the group to use, must not be NULL
+++++ + *
+++++ + * This function is called by iommu drivers to take additional references on an
+++++ + * existing group. Returns the given group for convenience.
+++++ + */
+++++ +struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
+++++ +{
+++++ + kobject_get(group->devices_kobj);
+++++ + return group;
+++++ +}
+++++ +
/**
* iommu_group_put - Decrement group reference
* @group: the group to use
return ret;
}
++ ++++struct iommu_instance {
++ ++++ struct list_head list;
++ ++++ struct fwnode_handle *fwnode;
++ ++++ const struct iommu_ops *ops;
++ ++++};
++ ++++static LIST_HEAD(iommu_instance_list);
++ ++++static DEFINE_SPINLOCK(iommu_instance_lock);
++ ++++
++ ++++void iommu_register_instance(struct fwnode_handle *fwnode,
++ ++++ const struct iommu_ops *ops)
++ ++++{
++ ++++ struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
++ ++++
++ ++++ if (WARN_ON(!iommu))
++ ++++ return;
++ ++++
++ ++++ of_node_get(to_of_node(fwnode));
++ ++++ INIT_LIST_HEAD(&iommu->list);
++ ++++ iommu->fwnode = fwnode;
++ ++++ iommu->ops = ops;
++ ++++ spin_lock(&iommu_instance_lock);
++ ++++ list_add_tail(&iommu->list, &iommu_instance_list);
++ ++++ spin_unlock(&iommu_instance_lock);
++ ++++}
++ ++++
++ ++++const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
++ ++++{
++ ++++ struct iommu_instance *instance;
++ ++++ const struct iommu_ops *ops = NULL;
++ ++++
++ ++++ spin_lock(&iommu_instance_lock);
++ ++++ list_for_each_entry(instance, &iommu_instance_list, list)
++ ++++ if (instance->fwnode == fwnode) {
++ ++++ ops = instance->ops;
++ ++++ break;
++ ++++ }
++ ++++ spin_unlock(&iommu_instance_lock);
++ ++++ return ops;
++ ++++}
++ ++++
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops)
{
static void mtk_iommu_config(struct mtk_iommu_data *data,
struct device *dev, bool enable)
{
- ----- struct mtk_iommu_client_priv *head, *cur, *next;
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
+ +++++ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ +++++ int i;
- ----- head = dev->archdata.iommu;
- ----- list_for_each_entry_safe(cur, next, &head->client, client) {
- ----- larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
- ----- portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
+ +++++ for (i = 0; i < fwspec->num_ids; ++i) {
+ +++++ larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
+ +++++ portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
larb_mmu = &data->smi_imu.larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
struct device *dev)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- ----- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- ----- struct mtk_iommu_data *data;
+ +++++ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
int ret;
- ----- if (!priv)
+ +++++ if (!data)
return -ENODEV;
- ----- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_dom) {
data->m4u_dom = dom;
ret = mtk_iommu_domain_finalise(data);
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- ----- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- ----- struct mtk_iommu_data *data;
+ +++++ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- ----- if (!priv)
+ +++++ if (!data)
return;
- ----- data = dev_get_drvdata(priv->m4udev);
mtk_iommu_config(data, dev, false);
}
{
struct iommu_group *group;
- ----- if (!dev->archdata.iommu) /* Not a iommu client device */
- ----- return -ENODEV;
+ +++++ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+ +++++ return -ENODEV; /* Not a iommu client device */
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
static void mtk_iommu_remove_device(struct device *dev)
{
- ----- struct mtk_iommu_client_priv *head, *cur, *next;
- -----
- ----- head = dev->archdata.iommu;
- ----- if (!head)
+ +++++ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
- ----- list_for_each_entry_safe(cur, next, &head->client, client) {
- ----- list_del(&cur->client);
- ----- kfree(cur);
- ----- }
- ----- kfree(head);
- ----- dev->archdata.iommu = NULL;
- -----
iommu_group_remove_device(dev);
+ +++++ iommu_fwspec_free(dev);
}
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
- ----- struct mtk_iommu_data *data;
- ----- struct mtk_iommu_client_priv *priv;
+ +++++ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- ----- priv = dev->archdata.iommu;
- ----- if (!priv)
+ +++++ if (!data)
return ERR_PTR(-ENODEV);
/* All the client devices are in the same m4u iommu-group */
- ----- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_group) {
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+++++ + } else {
+++++ + iommu_group_ref_get(data->m4u_group);
}
return data->m4u_group;
}
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- ----- struct mtk_iommu_client_priv *head, *priv, *next;
struct platform_device *m4updev;
if (args->args_count != 1) {
return -EINVAL;
}
- ----- if (!dev->archdata.iommu) {
+ +++++ if (!dev->iommu_fwspec->iommu_priv) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- ----- head = kzalloc(sizeof(*head), GFP_KERNEL);
- ----- if (!head)
- ----- return -ENOMEM;
- -----
- ----- dev->archdata.iommu = head;
- ----- INIT_LIST_HEAD(&head->client);
- ----- head->m4udev = &m4updev->dev;
- ----- } else {
- ----- head = dev->archdata.iommu;
+ +++++ dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
}
- ----- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- ----- if (!priv)
- ----- goto err_free_mem;
- -----
- ----- priv->mtk_m4u_id = args->args[0];
- ----- list_add_tail(&priv->client, &head->client);
- -----
- ----- return 0;
- -----
- -----err_free_mem:
- ----- list_for_each_entry_safe(priv, next, &head->client, client)
- ----- kfree(priv);
- ----- kfree(head);
- ----- dev->archdata.iommu = NULL;
- ----- return -ENOMEM;
+ +++++ return iommu_fwspec_add_ids(dev, args->args, 1);
}
static struct iommu_ops mtk_iommu_ops = {
continue;
plarbdev = of_find_device_by_node(larbnode);
- ----- of_node_put(larbnode);
if (!plarbdev) {
plarbdev = of_platform_device_create(
larbnode, NULL,
platform_bus_type.dev_root);
- ----- if (!plarbdev)
+ +++++ if (!plarbdev) {
+ +++++ of_node_put(larbnode);
return -EPROBE_DEFER;
+ +++++ }
}
data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
- ----- component_match_add(dev, &match, compare_of, larbnode);
+ +++++ component_match_add_release(dev, &match, release_of,
+ +++++ compare_of, larbnode);
}
platform_set_drvdata(pdev, data);
static void mtk_iommu_config(struct mtk_iommu_data *data,
struct device *dev, bool enable)
{
- ----- struct mtk_iommu_client_priv *head, *cur, *next;
struct mtk_smi_larb_iommu *larb_mmu;
unsigned int larbid, portid;
+ +++++ struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ +++++ int i;
- ----- head = dev->archdata.iommu;
- ----- list_for_each_entry_safe(cur, next, &head->client, client) {
- ----- larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id);
- ----- portid = mt2701_m4u_to_port(cur->mtk_m4u_id);
+ +++++ for (i = 0; i < fwspec->num_ids; ++i) {
+ +++++ larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
+ +++++ portid = mt2701_m4u_to_port(fwspec->ids[i]);
larb_mmu = &data->smi_imu.larb_imu[larbid];
dev_dbg(dev, "%s iommu port: %d\n",
struct device *dev)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- ----- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- ----- struct mtk_iommu_data *data;
+ +++++ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
int ret;
- ----- if (!priv)
+ +++++ if (!data)
return -ENODEV;
- ----- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_dom) {
data->m4u_dom = dom;
ret = mtk_iommu_domain_finalise(data);
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- ----- struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
- ----- struct mtk_iommu_data *data;
+ +++++ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- ----- if (!priv)
+ +++++ if (!data)
return;
- ----- data = dev_get_drvdata(priv->m4udev);
mtk_iommu_config(data, dev, false);
}
return pa;
}
+ +++++static struct iommu_ops mtk_iommu_ops;
+ +++++
/*
* MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space.
static int mtk_iommu_create_mapping(struct device *dev,
struct of_phandle_args *args)
{
- ----- struct mtk_iommu_client_priv *head, *priv, *next;
+ +++++ struct mtk_iommu_data *data;
struct platform_device *m4updev;
struct dma_iommu_mapping *mtk_mapping;
struct device *m4udev;
return -EINVAL;
}
- ----- if (!dev->archdata.iommu) {
+ +++++ if (!dev->iommu_fwspec) {
+ +++++ ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
+ +++++ if (ret)
+ +++++ return ret;
+ +++++ } else if (dev->iommu_fwspec->ops != &mtk_iommu_ops) {
+ +++++ return -EINVAL;
+ +++++ }
+ +++++
+ +++++ if (!dev->iommu_fwspec->iommu_priv) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- ----- head = kzalloc(sizeof(*head), GFP_KERNEL);
- ----- if (!head)
- ----- return -ENOMEM;
- -----
- ----- dev->archdata.iommu = head;
- ----- INIT_LIST_HEAD(&head->client);
- ----- head->m4udev = &m4updev->dev;
- ----- } else {
- ----- head = dev->archdata.iommu;
+ +++++ dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
}
- ----- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- ----- if (!priv) {
- ----- ret = -ENOMEM;
- ----- goto err_free_mem;
- ----- }
- ----- priv->mtk_m4u_id = args->args[0];
- ----- list_add_tail(&priv->client, &head->client);
+ +++++ ret = iommu_fwspec_add_ids(dev, args->args, 1);
+ +++++ if (ret)
+ +++++ return ret;
- ----- m4udev = head->m4udev;
+ +++++ data = dev->iommu_fwspec->iommu_priv;
+ +++++ m4udev = data->dev;
mtk_mapping = m4udev->archdata.iommu;
if (!mtk_mapping) {
/* MTK iommu support 4GB iova address space. */
mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
0, 1ULL << 32);
- ----- if (IS_ERR(mtk_mapping)) {
- ----- ret = PTR_ERR(mtk_mapping);
- ----- goto err_free_mem;
- ----- }
+ +++++ if (IS_ERR(mtk_mapping))
+ +++++ return PTR_ERR(mtk_mapping);
+ +++++
m4udev->archdata.iommu = mtk_mapping;
}
err_release_mapping:
arm_iommu_release_mapping(mtk_mapping);
m4udev->archdata.iommu = NULL;
- -----err_free_mem:
- ----- list_for_each_entry_safe(priv, next, &head->client, client)
- ----- kfree(priv);
- ----- kfree(head);
- ----- dev->archdata.iommu = NULL;
return ret;
}
of_node_put(iommu_spec.np);
}
- ----- if (!dev->archdata.iommu) /* Not a iommu client device */
- ----- return -ENODEV;
+ +++++ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+ +++++ return -ENODEV; /* Not a iommu client device */
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
static void mtk_iommu_remove_device(struct device *dev)
{
- ----- struct mtk_iommu_client_priv *head, *cur, *next;
- -----
- ----- head = dev->archdata.iommu;
- ----- if (!head)
+ +++++ if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
- ----- list_for_each_entry_safe(cur, next, &head->client, client) {
- ----- list_del(&cur->client);
- ----- kfree(cur);
- ----- }
- ----- kfree(head);
- ----- dev->archdata.iommu = NULL;
- -----
iommu_group_remove_device(dev);
+ +++++ iommu_fwspec_free(dev);
}
static struct iommu_group *mtk_iommu_device_group(struct device *dev)
{
- ----- struct mtk_iommu_data *data;
- ----- struct mtk_iommu_client_priv *priv;
+ +++++ struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
- ----- priv = dev->archdata.iommu;
- ----- if (!priv)
+ +++++ if (!data)
return ERR_PTR(-ENODEV);
/* All the client devices are in the same m4u iommu-group */
- ----- data = dev_get_drvdata(priv->m4udev);
if (!data->m4u_group) {
data->m4u_group = iommu_group_alloc();
if (IS_ERR(data->m4u_group))
dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+++++ + } else {
+++++ + iommu_group_ref_get(data->m4u_group);
}
return data->m4u_group;
}
continue;
plarbdev = of_find_device_by_node(larb_spec.np);
- ----- of_node_put(larb_spec.np);
if (!plarbdev) {
plarbdev = of_platform_device_create(
larb_spec.np, NULL,
platform_bus_type.dev_root);
- ----- if (!plarbdev)
+ +++++ if (!plarbdev) {
+ +++++ of_node_put(larb_spec.np);
return -EPROBE_DEFER;
+ +++++ }
}
data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
- ----- component_match_add(dev, &match, compare_of, larb_spec.np);
+ +++++ component_match_add_release(dev, &match, release_of,
+ +++++ compare_of, larb_spec.np);
larb_nr++;
}
dev_warn(&dev->dev, "PCI-X settings not supported\n");
}
++++++static bool pcie_root_rcb_set(struct pci_dev *dev)
++++++{
++++++ struct pci_dev *rp = pcie_find_root_port(dev);
++++++ u16 lnkctl;
++++++
++++++ if (!rp)
++++++ return false;
++++++
++++++ pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
++++++ if (lnkctl & PCI_EXP_LNKCTL_RCB)
++++++ return true;
++++++
++++++ return false;
++++++}
++++++
static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
{
int pos;
~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
/* Initialize Link Control Register */
------ if (pcie_cap_has_lnkctl(dev))
++++++ if (pcie_cap_has_lnkctl(dev)) {
++++++
++++++ /*
++++++ * If the Root Port supports Read Completion Boundary of
++++++ * 128, set RCB to 128. Otherwise, clear it.
++++++ */
++++++ hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
++++++ hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
++++++ if (pcie_root_rcb_set(dev))
++++++ hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
++++++
pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
++++++ }
/* Find Advanced Error Reporting Enhanced Capability */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (attr == DEV_DMA_NOT_SUPPORTED)
dev_warn(&dev->dev, "DMA not supported.\n");
else
-- ---- arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
-- ---- attr == DEV_DMA_COHERENT);
++ ++++ acpi_dma_configure(&dev->dev, attr);
}
pci_put_host_bridge_device(bridge);
extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *));
extern struct iommu_group *iommu_group_get(struct device *dev);
+++++ +extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb);
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
++ ++++void iommu_register_instance(struct fwnode_handle *fwnode,
++ ++++ const struct iommu_ops *ops);
++ ++++const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
#else /* CONFIG_IOMMU_API */
return -ENODEV;
}
++ ++++static inline void iommu_register_instance(struct fwnode_handle *fwnode,
++ ++++ const struct iommu_ops *ops)
++ ++++{
++ ++++}
++ ++++
++ ++++static inline
++ ++++const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
++ ++++{
++ ++++ return NULL;
++ ++++}
++ ++++
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */