#include <linux/pm.h>
#include <linux/log2.h>
+ #include <linux/soc/qcom/irq.h>
+
#include "../core.h"
#include "../pinconf.h"
#include "pinctrl-msm.h"
* @enabled_irqs: Bitmap of currently enabled irqs.
* @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
* detection.
+ * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
* @soc; Reference to soc_data of platform specific data.
* @regs: Base addresses for the TLMM tiles.
*/
DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
+ DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs[MAX_NR_TILES];
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_mask_parent(d);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_unmask_parent(d);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
static void msm_gpio_irq_enable(struct irq_data *d)
{
+ /*
+ * Clear the interrupt that may be pending before we enable
+ * the line.
+ * This is especially a problem with the GPIOs routed to the
+ * PDC. These GPIOs are direct-connect interrupts to the GIC.
+ * Disabling the interrupt line at the PDC does not prevent
+ * the interrupt from being latched at the GIC. The state at
+ * GIC needs to be cleared before enabling.
+ */
+ if (d->parent_data) {
+ irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+ irq_chip_enable_parent(d);
+ }
msm_gpio_irq_clear_unmask(d, true);
}
+ static void msm_gpio_irq_disable(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ if (d->parent_data)
+ irq_chip_disable_parent(d);
+
+ if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ msm_gpio_irq_mask(d);
+ }
+
static void msm_gpio_irq_unmask(struct irq_data *d)
{
msm_gpio_irq_clear_unmask(d, false);
unsigned long flags;
u32 val;
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
unsigned long flags;
u32 val;
+ if (d->parent_data)
+ irq_chip_set_type_parent(d, type);
+
+ if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+ return 0;
+
g = &pctrl->soc->groups[d->hwirq];
raw_spin_lock_irqsave(&pctrl->lock, flags);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
unsigned long flags;
+ /*
+ * While they may not wake up when the TLMM is powered off,
+ * some GPIOs would like to wakeup the system from suspend
+ * when TLMM is powered on. To allow that, enable the GPIO
+ * summary line to be wakeup capable at GIC.
+ */
+ if (d->parent_data)
+ irq_chip_set_wake_parent(d, on);
+
raw_spin_lock_irqsave(&pctrl->lock, flags);
irq_set_irq_wake(pctrl->irq, on);
chained_irq_exit(chip, desc);
}
+ static int msm_gpio_wakeirq(struct gpio_chip *gc,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+ {
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_gpio_wakeirq_map *map;
+ int i;
+
+ *parent = GPIO_NO_WAKE_IRQ;
+ *parent_type = IRQ_TYPE_EDGE_RISING;
+
+ for (i = 0; i < pctrl->soc->nwakeirq_map; i++) {
+ map = &pctrl->soc->wakeirq_map[i];
+ if (map->gpio == child) {
+ *parent = map->wakeirq;
+ break;
+ }
+ }
+
+ return 0;
+ }
+
static bool msm_gpio_needs_valid_mask(struct msm_pinctrl *pctrl)
{
if (pctrl->soc->reserved_gpios)
{
struct gpio_chip *chip;
struct gpio_irq_chip *girq;
- int ret;
- unsigned ngpio = pctrl->soc->ngpios;
+ int i, ret;
+ unsigned gpio, ngpio = pctrl->soc->ngpios;
+ struct device_node *np;
+ bool skip;
if (WARN_ON(ngpio > MAX_NR_GPIO))
return -EINVAL;
pctrl->irq_chip.name = "msmgpio";
pctrl->irq_chip.irq_enable = msm_gpio_irq_enable;
+ pctrl->irq_chip.irq_disable = msm_gpio_irq_disable;
pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
+ pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
pctrl->irq_chip.irq_release_resources = msm_gpio_irq_relres;
+ np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
+ if (np) {
+ chip->irq.parent_domain = irq_find_matching_host(np,
+ DOMAIN_BUS_WAKEUP);
+ of_node_put(np);
+ if (!chip->irq.parent_domain)
+ return -EPROBE_DEFER;
+ chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
+
+ /*
+ * Let's skip handling the GPIOs, if the parent irqchip
+ * is handling the direct connect IRQ of the GPIO.
+ */
+ skip = irq_domain_qcom_handle_wakeup(chip->irq.parent_domain);
+ for (i = 0; skip && i < pctrl->soc->nwakeirq_map; i++) {
+ gpio = pctrl->soc->wakeirq_map[i].gpio;
+ set_bit(gpio, pctrl->skip_wake_irqs);
+ }
+ }
+
girq = &chip->irq;
girq->chip = &pctrl->irq_chip;
girq->parent_handler = msm_gpio_irq_handler;
+ girq->fwnode = pctrl->dev->fwnode;
girq->num_parents = 1;
girq->parents = devm_kcalloc(pctrl->dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
return PTR_ERR(pctrl->regs[i]);
}
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->regs[0] = devm_ioremap_resource(&pdev->dev, res);
+ pctrl->regs[0] = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pctrl->regs[0]))
return PTR_ERR(pctrl->regs[0]);
}
};
#endif
-BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
+BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
{
- int ret;
+ int ret = probe_user_read(dst, unsafe_ptr, size);
- ret = security_locked_down(LOCKDOWN_BPF_READ);
- if (ret < 0)
- goto out;
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_user_proto = {
+ .func = bpf_probe_read_user,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size,
+ const void __user *, unsafe_ptr)
+{
+ int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size);
- ret = probe_kernel_read(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_user_str_proto = {
+ .func = bpf_probe_read_user_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static __always_inline int
+bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr,
+ const bool compat)
+{
+ int ret = security_locked_down(LOCKDOWN_BPF_READ);
+
+ if (unlikely(ret < 0))
+ goto out;
+ ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) :
+ probe_kernel_read_strict(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
out:
memset(dst, 0, size);
+ return ret;
+}
+
+BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false);
+}
+
+static const struct bpf_func_proto bpf_probe_read_kernel_proto = {
+ .func = bpf_probe_read_kernel,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true);
+}
+
+static const struct bpf_func_proto bpf_probe_read_compat_proto = {
+ .func = bpf_probe_read_compat,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static __always_inline int
+bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr,
+ const bool compat)
+{
+ int ret = security_locked_down(LOCKDOWN_BPF_READ);
+
+ if (unlikely(ret < 0))
+ goto out;
+ /*
+ * The strncpy_from_unsafe_*() call will likely not fill the entire
+ * buffer, but that's okay in this circumstance as we're probing
+ * arbitrary memory anyway similar to bpf_probe_read_*() and might
+ * as well probe the stack. Thus, memory is explicitly cleared
+ * only in error case, so that improper users ignoring return
+ * code altogether don't copy garbage; otherwise length of string
+ * is returned that can be used for bpf_perf_event_output() et al.
+ */
+ ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) :
+ strncpy_from_unsafe_strict(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+out:
+ memset(dst, 0, size);
return ret;
}
-static const struct bpf_func_proto bpf_probe_read_proto = {
- .func = bpf_probe_read,
+BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false);
+}
+
+static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = {
+ .func = bpf_probe_read_kernel_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true);
+}
+
+static const struct bpf_func_proto bpf_probe_read_compat_str_proto = {
+ .func = bpf_probe_read_compat_str,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
+BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
u32, size)
{
/*
return -EPERM;
if (unlikely(!nmi_uaccess_okay()))
return -EPERM;
- if (!access_ok(unsafe_ptr, size))
- return -EPERM;
- return probe_kernel_write(unsafe_ptr, src, size);
+ return probe_user_write(unsafe_ptr, src, size);
}
static const struct bpf_func_proto bpf_probe_write_user_proto = {
.arg2_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
- const void *, unsafe_ptr)
-{
- int ret;
-
- ret = security_locked_down(LOCKDOWN_BPF_READ);
- if (ret < 0)
- goto out;
-
- /*
- * The strncpy_from_unsafe() call will likely not fill the entire
- * buffer, but that's okay in this circumstance as we're probing
- * arbitrary memory anyway similar to bpf_probe_read() and might
- * as well probe the stack. Thus, memory is explicitly cleared
- * only in error case, so that improper users ignoring return
- * code altogether don't copy garbage; otherwise length of string
- * is returned that can be used for bpf_perf_event_output() et al.
- */
- ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
- if (unlikely(ret < 0))
-out:
- memset(dst, 0, size);
-
- return ret;
-}
-
-static const struct bpf_func_proto bpf_probe_read_str_proto = {
- .func = bpf_probe_read_str,
- .gpl_only = true,
- .ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_UNINIT_MEM,
- .arg2_type = ARG_CONST_SIZE_OR_ZERO,
- .arg3_type = ARG_ANYTHING,
-};
-
struct send_signal_irq_work {
struct irq_work irq_work;
struct task_struct *task;
return -EINVAL;
work = this_cpu_ptr(&send_signal_work);
- if (work->irq_work.flags & IRQ_WORK_BUSY)
+ if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY)
return -EBUSY;
/* Add the current task, which is the target of sending signal,
return &bpf_map_pop_elem_proto;
case BPF_FUNC_map_peek_elem:
return &bpf_map_peek_elem_proto;
- case BPF_FUNC_probe_read:
- return &bpf_probe_read_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_tail_call:
return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_probe_read_user:
+ return &bpf_probe_read_user_proto;
+ case BPF_FUNC_probe_read_kernel:
+ return &bpf_probe_read_kernel_proto;
+ case BPF_FUNC_probe_read:
+ return &bpf_probe_read_compat_proto;
+ case BPF_FUNC_probe_read_user_str:
+ return &bpf_probe_read_user_str_proto;
+ case BPF_FUNC_probe_read_kernel_str:
+ return &bpf_probe_read_kernel_str_proto;
case BPF_FUNC_probe_read_str:
- return &bpf_probe_read_str_proto;
+ return &bpf_probe_read_compat_str_proto;
#ifdef CONFIG_CGROUPS
case BPF_FUNC_get_current_cgroup_id:
return &bpf_get_current_cgroup_id_proto;
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
+extern const struct bpf_func_proto bpf_skb_output_proto;
+
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags)
{
}
}
+static const struct bpf_func_proto *
+tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+#ifdef CONFIG_NET
+ case BPF_FUNC_skb_output:
+ return &bpf_skb_output_proto;
+#endif
+ default:
+ return raw_tp_prog_func_proto(func_id, prog);
+ }
+}
+
static bool raw_tp_prog_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
- /* largest tracepoint in the kernel has 12 args */
- if (off < 0 || off >= sizeof(__u64) * 12)
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
return false;
if (type != BPF_READ)
return false;
return true;
}
+static bool tracing_prog_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
.get_func_proto = raw_tp_prog_func_proto,
.is_valid_access = raw_tp_prog_is_valid_access,
const struct bpf_prog_ops raw_tracepoint_prog_ops = {
};
+const struct bpf_verifier_ops tracing_verifier_ops = {
+ .get_func_proto = tracing_prog_func_proto,
+ .is_valid_access = tracing_prog_is_valid_access,
+};
+
+const struct bpf_prog_ops tracing_prog_ops = {
+};
+
static bool raw_tp_writable_prog_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,