#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/ioport.h>
#include <linux/dma-mapping.h>
#include <linux/memblock.h>
#include <linux/err.h>
}
EXPORT_SYMBOL_GPL(platform_get_resource);
+struct resource *platform_get_mem_or_io(struct platform_device *dev,
+ unsigned int num)
+{
+ u32 i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
+
#ifdef CONFIG_HAS_IOMEM
/**
* devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
}
EXPORT_SYMBOL_GPL(platform_irq_count);
+ struct irq_affinity_devres {
+ unsigned int count;
+ unsigned int irq[];
+ };
+
+ static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
+ {
+ struct resource *r;
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
+ if (r)
+ irqresource_disabled(r, 0);
+ }
+
+ static void devm_platform_get_irqs_affinity_release(struct device *dev,
+ void *res)
+ {
+ struct irq_affinity_devres *ptr = res;
+ int i;
+
+ for (i = 0; i < ptr->count; i++) {
+ irq_dispose_mapping(ptr->irq[i]);
+
+ if (has_acpi_companion(dev))
+ platform_disable_acpi_irq(to_platform_device(dev), i);
+ }
+ }
+
+ /**
+ * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a
+ * device using an interrupt affinity descriptor
+ * @dev: platform device pointer
+ * @affd: affinity descriptor
+ * @minvec: minimum count of interrupt vectors
+ * @maxvec: maximum count of interrupt vectors
+ * @irqs: pointer holder for IRQ numbers
+ *
+ * Gets a set of IRQs for a platform device, and updates IRQ afffinty according
+ * to the passed affinity descriptor
+ *
+ * Return: Number of vectors on success, negative error number on failure.
+ */
+ int devm_platform_get_irqs_affinity(struct platform_device *dev,
+ struct irq_affinity *affd,
+ unsigned int minvec,
+ unsigned int maxvec,
+ int **irqs)
+ {
+ struct irq_affinity_devres *ptr;
+ struct irq_affinity_desc *desc;
+ size_t size;
+ int i, ret, nvec;
+
+ if (!affd)
+ return -EPERM;
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ nvec = platform_irq_count(dev);
+
+ if (nvec < minvec)
+ return -ENOSPC;
+
+ nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
+ if (nvec < minvec)
+ return -ENOSPC;
+
+ if (nvec > maxvec)
+ nvec = maxvec;
+
+ size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
+ ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ptr->count = nvec;
+
+ for (i = 0; i < nvec; i++) {
+ int irq = platform_get_irq(dev, i);
+ if (irq < 0) {
+ ret = irq;
+ goto err_free_devres;
+ }
+ ptr->irq[i] = irq;
+ }
+
+ desc = irq_create_affinity_masks(nvec, affd);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto err_free_devres;
+ }
+
+ for (i = 0; i < nvec; i++) {
+ ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
+ if (ret) {
+ dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
+ ptr->irq[i], ret);
+ goto err_free_desc;
+ }
+ }
+
+ devres_add(&dev->dev, ptr);
+
+ kfree(desc);
+
+ *irqs = ptr->irq;
+
+ return nvec;
+
+ err_free_desc:
+ kfree(desc);
+ err_free_devres:
+ devres_free(ptr);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
+
/**
* platform_get_resource_byname - get a resource for a device by name
* @dev: platform device
}
EXPORT_SYMBOL_GPL(platform_device_register_full);
-static int platform_drv_probe(struct device *_dev)
-{
- struct platform_driver *drv = to_platform_driver(_dev->driver);
- struct platform_device *dev = to_platform_device(_dev);
- int ret;
-
- ret = of_clk_set_defaults(_dev->of_node, false);
- if (ret < 0)
- return ret;
-
- ret = dev_pm_domain_attach(_dev, true);
- if (ret)
- goto out;
-
- if (drv->probe) {
- ret = drv->probe(dev);
- if (ret)
- dev_pm_domain_detach(_dev, true);
- }
-
-out:
- if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
- dev_warn(_dev, "probe deferral not supported\n");
- ret = -ENXIO;
- }
-
- return ret;
-}
-
-static int platform_drv_probe_fail(struct device *_dev)
-{
- return -ENXIO;
-}
-
-static int platform_drv_remove(struct device *_dev)
-{
- struct platform_driver *drv = to_platform_driver(_dev->driver);
- struct platform_device *dev = to_platform_device(_dev);
- int ret = 0;
-
- if (drv->remove)
- ret = drv->remove(dev);
- dev_pm_domain_detach(_dev, true);
-
- return ret;
-}
-
-static void platform_drv_shutdown(struct device *_dev)
-{
- struct platform_driver *drv = to_platform_driver(_dev->driver);
- struct platform_device *dev = to_platform_device(_dev);
-
- if (drv->shutdown)
- drv->shutdown(dev);
-}
-
/**
* __platform_driver_register - register a driver for platform-level devices
* @drv: platform driver structure
{
drv->driver.owner = owner;
drv->driver.bus = &platform_bus_type;
- drv->driver.probe = platform_drv_probe;
- drv->driver.remove = platform_drv_remove;
- drv->driver.shutdown = platform_drv_shutdown;
return driver_register(&drv->driver);
}
}
EXPORT_SYMBOL_GPL(platform_driver_unregister);
+static int platform_probe_fail(struct platform_device *pdev)
+{
+ return -ENXIO;
+}
+
/**
* __platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
* new devices fail.
*/
spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
- drv->probe = NULL;
+ drv->probe = platform_probe_fail;
if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
retval = -ENODEV;
- drv->driver.probe = platform_drv_probe_fail;
spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
if (code != retval)
}
EXPORT_SYMBOL_GPL(platform_unregister_drivers);
-/* modalias support enables more hands-off userspace setup:
- * (a) environment variable lets new-style hotplug events work once system is
- * fully running: "modprobe $MODALIAS"
- * (b) sysfs attribute lets new-style coldplug recover from hotplug events
- * mishandled before system is fully running: "modprobe $(cat modalias)"
- */
-static ssize_t modalias_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- int len;
-
- len = of_device_modalias(dev, buf, PAGE_SIZE);
- if (len != -ENODEV)
- return len;
-
- len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
- if (len != -ENODEV)
- return len;
-
- return sysfs_emit(buf, "platform:%s\n", pdev->name);
-}
-static DEVICE_ATTR_RO(modalias);
-
-static ssize_t driver_override_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct platform_device *pdev = to_platform_device(dev);
- char *driver_override, *old, *cp;
-
- /* We need to keep extra room for a newline */
- if (count >= (PAGE_SIZE - 1))
- return -EINVAL;
-
- driver_override = kstrndup(buf, count, GFP_KERNEL);
- if (!driver_override)
- return -ENOMEM;
-
- cp = strchr(driver_override, '\n');
- if (cp)
- *cp = '\0';
-
- device_lock(dev);
- old = pdev->driver_override;
- if (strlen(driver_override)) {
- pdev->driver_override = driver_override;
- } else {
- kfree(driver_override);
- pdev->driver_override = NULL;
- }
- device_unlock(dev);
-
- kfree(old);
-
- return count;
-}
-
-static ssize_t driver_override_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- ssize_t len;
-
- device_lock(dev);
- len = sysfs_emit(buf, "%s\n", pdev->driver_override);
- device_unlock(dev);
-
- return len;
-}
-static DEVICE_ATTR_RW(driver_override);
-
-static ssize_t numa_node_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_emit(buf, "%d\n", dev_to_node(dev));
-}
-static DEVICE_ATTR_RO(numa_node);
-
-static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
- int n)
-{
- struct device *dev = container_of(kobj, typeof(*dev), kobj);
-
- if (a == &dev_attr_numa_node.attr &&
- dev_to_node(dev) == NUMA_NO_NODE)
- return 0;
-
- return a->mode;
-}
-
-static struct attribute *platform_dev_attrs[] = {
- &dev_attr_modalias.attr,
- &dev_attr_numa_node.attr,
- &dev_attr_driver_override.attr,
- NULL,
-};
-
-static struct attribute_group platform_dev_group = {
- .attrs = platform_dev_attrs,
- .is_visible = platform_dev_attrs_visible,
-};
-__ATTRIBUTE_GROUPS(platform_dev);
-
-static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- struct platform_device *pdev = to_platform_device(dev);
- int rc;
-
- /* Some devices have extra OF data and an OF-style MODALIAS */
- rc = of_device_uevent_modalias(dev, env);
- if (rc != -ENODEV)
- return rc;
-
- rc = acpi_device_uevent_modalias(dev, env);
- if (rc != -ENODEV)
- return rc;
-
- add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
- pdev->name);
- return 0;
-}
-
static const struct platform_device_id *platform_match_id(
const struct platform_device_id *id,
struct platform_device *pdev)
return NULL;
}
-/**
- * platform_match - bind platform device to platform driver.
- * @dev: device.
- * @drv: driver.
- *
- * Platform device IDs are assumed to be encoded like this:
- * "<name><instance>", where <name> is a short description of the type of
- * device, like "pci" or "floppy", and <instance> is the enumerated
- * instance of the device, like '0' or '42'. Driver IDs are simply
- * "<name>". So, extract the <name> from the platform_device structure,
- * and compare it against the name of the driver. Return whether they match
- * or not.
- */
-static int platform_match(struct device *dev, struct device_driver *drv)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct platform_driver *pdrv = to_platform_driver(drv);
-
- /* When driver_override is set, only bind to the matching driver */
- if (pdev->driver_override)
- return !strcmp(pdev->driver_override, drv->name);
-
- /* Attempt an OF style match first */
- if (of_driver_match_device(dev, drv))
- return 1;
-
- /* Then try ACPI style match */
- if (acpi_driver_match_device(dev, drv))
- return 1;
-
- /* Then try to match against the id table */
- if (pdrv->id_table)
- return platform_match_id(pdrv->id_table, pdev) != NULL;
-
- /* fall-back to driver name match */
- return (strcmp(pdev->name, drv->name) == 0);
-}
-
#ifdef CONFIG_PM_SLEEP
static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
#endif /* CONFIG_HIBERNATE_CALLBACKS */
+/* modalias support enables more hands-off userspace setup:
+ * (a) environment variable lets new-style hotplug events work once system is
+ * fully running: "modprobe $MODALIAS"
+ * (b) sysfs attribute lets new-style coldplug recover from hotplug events
+ * mishandled before system is fully running: "modprobe $(cat modalias)"
+ */
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int len;
+
+ len = of_device_modalias(dev, buf, PAGE_SIZE);
+ if (len != -ENODEV)
+ return len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
+
+ return sysfs_emit(buf, "platform:%s\n", pdev->name);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", dev_to_node(dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = sysfs_emit(buf, "%s\n", pdev->driver_override);
+ device_unlock(dev);
+
+ return len;
+}
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ char *driver_override, *old, *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = pdev->driver_override;
+ if (strlen(driver_override)) {
+ pdev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ pdev->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *platform_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_numa_node.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+
+ if (a == &dev_attr_numa_node.attr &&
+ dev_to_node(dev) == NUMA_NO_NODE)
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute_group platform_dev_group = {
+ .attrs = platform_dev_attrs,
+ .is_visible = platform_dev_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(platform_dev);
+
+
+/**
+ * platform_match - bind platform device to platform driver.
+ * @dev: device.
+ * @drv: driver.
+ *
+ * Platform device IDs are assumed to be encoded like this:
+ * "<name><instance>", where <name> is a short description of the type of
+ * device, like "pci" or "floppy", and <instance> is the enumerated
+ * instance of the device, like '0' or '42'. Driver IDs are simply
+ * "<name>". So, extract the <name> from the platform_device structure,
+ * and compare it against the name of the driver. Return whether they match
+ * or not.
+ */
+static int platform_match(struct device *dev, struct device_driver *drv)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct platform_driver *pdrv = to_platform_driver(drv);
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (pdev->driver_override)
+ return !strcmp(pdev->driver_override, drv->name);
+
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ /* Then try ACPI style match */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
+ /* Then try to match against the id table */
+ if (pdrv->id_table)
+ return platform_match_id(pdrv->id_table, pdev) != NULL;
+
+ /* fall-back to driver name match */
+ return (strcmp(pdev->name, drv->name) == 0);
+}
+
+static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int rc;
+
+ /* Some devices have extra OF data and an OF-style MODALIAS */
+ rc = of_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
+ add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
+ pdev->name);
+ return 0;
+}
+
+static int platform_probe(struct device *_dev)
+{
+ struct platform_driver *drv = to_platform_driver(_dev->driver);
+ struct platform_device *dev = to_platform_device(_dev);
+ int ret;
+
+ /*
+ * A driver registered using platform_driver_probe() cannot be bound
+ * again later because the probe function usually lives in __init code
+ * and so is gone. For these drivers .probe is set to
+ * platform_probe_fail in __platform_driver_probe(). Don't even prepare
+ * clocks and PM domains for these to match the traditional behaviour.
+ */
+ if (unlikely(drv->probe == platform_probe_fail))
+ return -ENXIO;
+
+ ret = of_clk_set_defaults(_dev->of_node, false);
+ if (ret < 0)
+ return ret;
+
+ ret = dev_pm_domain_attach(_dev, true);
+ if (ret)
+ goto out;
+
+ if (drv->probe) {
+ ret = drv->probe(dev);
+ if (ret)
+ dev_pm_domain_detach(_dev, true);
+ }
+
+out:
+ if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
+ dev_warn(_dev, "probe deferral not supported\n");
+ ret = -ENXIO;
+ }
+
+ return ret;
+}
+
+static int platform_remove(struct device *_dev)
+{
+ struct platform_driver *drv = to_platform_driver(_dev->driver);
+ struct platform_device *dev = to_platform_device(_dev);
+ int ret = 0;
+
+ if (drv->remove)
+ ret = drv->remove(dev);
+ dev_pm_domain_detach(_dev, true);
+
+ return ret;
+}
+
+static void platform_shutdown(struct device *_dev)
+{
+ struct platform_device *dev = to_platform_device(_dev);
+ struct platform_driver *drv;
+
+ if (!_dev->driver)
+ return;
+
+ drv = to_platform_driver(_dev->driver);
+ if (drv->shutdown)
+ drv->shutdown(dev);
+}
+
+
int platform_dma_configure(struct device *dev)
{
enum dev_dma_attr attr;
.dev_groups = platform_dev_groups,
.match = platform_match,
.uevent = platform_uevent,
+ .probe = platform_probe,
+ .remove = platform_remove,
+ .shutdown = platform_shutdown,
.dma_configure = platform_dma_configure,
.pm = &platform_dev_pm_ops,
};
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+ #define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
- #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
+ #define irq_count() (nmi_count() | hardirq_count() | softirq_count())
/*
- * Are we doing bottom half or hardware interrupt processing?
+ * Macros to retrieve the current execution context:
*
- * in_irq() - We're in (hard) IRQ context
+ * in_nmi() - We're in NMI context
+ * in_hardirq() - We're in hard IRQ context
+ * in_serving_softirq() - We're in softirq context
+ * in_task() - We're in task context
+ */
+ #define in_nmi() (nmi_count())
+ #define in_hardirq() (hardirq_count())
+ #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+ #define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
+
+ /*
+ * The following macros are deprecated and should not be used in new code:
+ * in_irq() - Obsolete version of in_hardirq()
* in_softirq() - We have BH disabled, or are processing softirqs
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
- * in_serving_softirq() - We're in softirq context
- * in_nmi() - We're in NMI context
- * in_task() - We're in task context
- *
- * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
- * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
- #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
- #define in_nmi() (preempt_count() & NMI_MASK)
- #define in_task() (!(preempt_count() & \
- (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
#endif
-/**
- * migrate_disable - Prevent migration of the current task
+#ifdef CONFIG_SMP
+
+/*
+ * Migrate-Disable and why it is undesired.
*
- * Maps to preempt_disable() which also disables preemption. Use
- * migrate_disable() to annotate that the intent is to prevent migration,
- * but not necessarily preemption.
+ * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * becomes one of the M highest priority tasks), it might still have to wait
+ * for the preemptee's migrate_disable() section to complete. Thereby suffering
+ * a reduction in bandwidth in the exact duration of the migrate_disable()
+ * section.
*
- * Can be invoked nested like preempt_disable() and needs the corresponding
- * number of migrate_enable() invocations.
- */
-static __always_inline void migrate_disable(void)
-{
- preempt_disable();
-}
-
-/**
- * migrate_enable - Allow migration of the current task
+ * Per this argument, the change from preempt_disable() to migrate_disable()
+ * gets us:
+ *
+ * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
+ * it would have had to wait for the lower priority task.
+ *
+ * - a lower priority tasks; which under preempt_disable() could've instantly
+ * migrated away when another CPU becomes available, is now constrained
+ * by the ability to push the higher priority task away, which might itself be
+ * in a migrate_disable() section, reducing it's available bandwidth.
+ *
+ * IOW it trades latency / moves the interference term, but it stays in the
+ * system, and as long as it remains unbounded, the system is not fully
+ * deterministic.
+ *
+ *
+ * The reason we have it anyway.
*
- * Counterpart to migrate_disable().
+ * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
+ * number of primitives into becoming preemptible, they would also allow
+ * migration. This turns out to break a bunch of per-cpu usage. To this end,
+ * all these primitives employ migirate_disable() to restore this implicit
+ * assumption.
*
- * As migrate_disable() can be invoked nested, only the outermost invocation
- * reenables migration.
+ * This is a 'temporary' work-around at best. The correct solution is getting
+ * rid of the above assumptions and reworking the code to employ explicit
+ * per-cpu locking or short preempt-disable regions.
+ *
+ * The end goal must be to get rid of migrate_disable(), alternatively we need
+ * a schedulability theory that does not depend on abritrary migration.
+ *
+ *
+ * Notes on the implementation.
+ *
+ * The implementation is particularly tricky since existing code patterns
+ * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
+ * This means that it cannot use cpus_read_lock() to serialize against hotplug,
+ * nor can it easily migrate itself into a pending affinity mask change on
+ * migrate_enable().
+ *
+ *
+ * Note: even non-work-conserving schedulers like semi-partitioned depends on
+ * migration, so migrate_disable() is not only a problem for
+ * work-conserving schedulers.
*
- * Currently mapped to preempt_enable().
*/
-static __always_inline void migrate_enable(void)
-{
- preempt_enable();
-}
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+
+#else
+
+static inline void migrate_disable(void) { }
+static inline void migrate_enable(void) { }
+
+#endif /* CONFIG_SMP */
#endif /* __LINUX_PREEMPT_H */
static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
#endif
-const struct fwnode_operations irqchip_fwnode_ops;
+static const char *irqchip_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
+
+ return fwid->name;
+}
+
+const struct fwnode_operations irqchip_fwnode_ops = {
+ .get_name = irqchip_fwnode_get_name,
+};
EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
/**
fwid->type = type;
fwid->name = n;
fwid->pa = pa;
- fwid->fwnode.ops = &irqchip_fwnode_ops;
+ fwnode_init(&fwid->fwnode, &irqchip_fwnode_ops);
return &fwid->fwnode;
}
EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
irq_hw_number_t first_hwirq,
const struct irq_domain_ops *ops,
void *host_data)
+ {
+ return irq_domain_create_legacy(of_node_to_fwnode(of_node), size,
+ first_irq, first_hwirq, ops, host_data);
+ }
+ EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
+
+ struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
struct irq_domain *domain;
- domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
- first_hwirq + size, 0, ops, host_data);
+ domain = __irq_domain_add(fwnode, first_hwirq + size, first_hwirq + size, 0, ops, host_data);
if (domain)
irq_domain_associate_many(domain, first_irq, first_hwirq, size);
return domain;
}
- EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
+ EXPORT_SYMBOL_GPL(irq_domain_create_legacy);
/**
* irq_find_matching_fwspec() - Locates a domain for a given fwspec
}
}
- void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
+ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
{
struct irq_data *irq_data = irq_get_irq_data(irq);
irq_hw_number_t hwirq;
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
/**
- * irq_create_mapping() - Map a hardware interrupt into linux irq space
+ * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
* @domain: domain owning this hardware interrupt or NULL for default domain
* @hwirq: hardware irq number in that domain space
+ * @affinity: irq affinity
*
* Only one mapping per hardware interrupt is permitted. Returns a linux
* irq number.
* If the sense/trigger is to be specified, set_irq_type() should be called
* on the number returned from that call.
*/
-unsigned int irq_create_mapping(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity)
{
struct device_node *of_node;
int virq;
}
/* Allocate a virtual interrupt number */
- virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
+ virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
+ affinity);
if (virq <= 0) {
pr_debug("-> virq allocation failed\n");
return 0;
return virq;
}
-EXPORT_SYMBOL_GPL(irq_create_mapping);
+EXPORT_SYMBOL_GPL(irq_create_mapping_affinity);
/**
* irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
{
int i;
- fwspec->fwnode = np ? &np->fwnode : NULL;
+ fwspec->fwnode = of_node_to_fwnode(np);
fwspec->param_count = count;
for (i = 0; i < count; i++)
unsigned int irq_base,
unsigned int nr_irqs)
{
- if (domain->ops->free)
- domain->ops->free(domain, irq_base, nr_irqs);
+ unsigned int i;
+
+ if (!domain->ops->free)
+ return;
+
+ for (i = 0; i < nr_irqs; i++) {
+ if (irq_domain_get_irq_data(domain, irq_base + i))
+ domain->ops->free(domain, irq_base + i, 1);
+ }
}
int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,