1 // SPDX-License-Identifier: GPL-2.0-only
4 * HID-BPF support for Linux
6 * Copyright (c) 2024 Benjamin Tissoires
9 #include <linux/bitops.h>
10 #include <linux/bpf_verifier.h>
11 #include <linux/bpf.h>
12 #include <linux/btf.h>
13 #include <linux/btf_ids.h>
14 #include <linux/filter.h>
15 #include <linux/hid.h>
16 #include <linux/hid_bpf.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/stddef.h>
20 #include <linux/workqueue.h>
21 #include "hid_bpf_dispatch.h"
23 static struct btf *hid_bpf_ops_btf;
25 static int hid_bpf_ops_init(struct btf *btf)
27 hid_bpf_ops_btf = btf;
31 static bool hid_bpf_ops_is_valid_access(int off, int size,
32 enum bpf_access_type type,
33 const struct bpf_prog *prog,
34 struct bpf_insn_access_aux *info)
36 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
39 static int hid_bpf_ops_check_member(const struct btf_type *t,
40 const struct btf_member *member,
41 const struct bpf_prog *prog)
43 u32 moff = __btf_member_bit_offset(t, member) / 8;
46 case offsetof(struct hid_bpf_ops, hid_rdesc_fixup):
47 case offsetof(struct hid_bpf_ops, hid_hw_request):
48 case offsetof(struct hid_bpf_ops, hid_hw_output_report):
58 struct hid_bpf_offset_write_range {
59 const char *struct_name;
65 static int hid_bpf_ops_btf_struct_access(struct bpf_verifier_log *log,
66 const struct bpf_reg_state *reg,
69 #define WRITE_RANGE(_name, _field, _is_string) \
71 .struct_name = #_name, \
72 .struct_length = sizeof(struct _name), \
73 .start = offsetof(struct _name, _field), \
74 .end = offsetofend(struct _name, _field) - !!(_is_string), \
77 const struct hid_bpf_offset_write_range write_ranges[] = {
78 WRITE_RANGE(hid_bpf_ctx, retval, false),
79 WRITE_RANGE(hid_device, name, true),
80 WRITE_RANGE(hid_device, uniq, true),
81 WRITE_RANGE(hid_device, phys, true),
84 const struct btf_type *state = NULL;
85 const struct btf_type *t;
86 const char *cur = NULL;
89 t = btf_type_by_id(reg->btf, reg->btf_id);
91 for (i = 0; i < ARRAY_SIZE(write_ranges); i++) {
92 const struct hid_bpf_offset_write_range *write_range = &write_ranges[i];
95 /* we already found a writeable struct, but there is a
96 * new one, let's break the loop.
98 if (t == state && write_range->struct_name != cur)
101 /* new struct to look for */
102 if (write_range->struct_name != cur) {
103 type_id = btf_find_by_name_kind(reg->btf, write_range->struct_name,
108 state = btf_type_by_id(reg->btf, type_id);
111 /* this is not the struct we are looking for */
113 cur = write_range->struct_name;
117 /* first time we see this struct, check for out of bounds */
118 if (cur != write_range->struct_name &&
119 off + size > write_range->struct_length) {
120 bpf_log(log, "write access for struct %s at off %d with size %d\n",
121 write_range->struct_name, off, size);
125 /* now check if we are in our boundaries */
126 if (off >= write_range->start && off + size <= write_range->end)
129 cur = write_range->struct_name;
134 bpf_log(log, "write access to this struct is not supported\n");
137 "write access at off %d with size %d on read-only part of %s\n",
143 static const struct bpf_verifier_ops hid_bpf_verifier_ops = {
144 .get_func_proto = bpf_base_func_proto,
145 .is_valid_access = hid_bpf_ops_is_valid_access,
146 .btf_struct_access = hid_bpf_ops_btf_struct_access,
149 static int hid_bpf_ops_init_member(const struct btf_type *t,
150 const struct btf_member *member,
151 void *kdata, const void *udata)
153 const struct hid_bpf_ops *uhid_bpf_ops;
154 struct hid_bpf_ops *khid_bpf_ops;
157 uhid_bpf_ops = (const struct hid_bpf_ops *)udata;
158 khid_bpf_ops = (struct hid_bpf_ops *)kdata;
160 moff = __btf_member_bit_offset(t, member) / 8;
163 case offsetof(struct hid_bpf_ops, hid_id):
164 /* For hid_id and flags fields, this function has to copy it
165 * and return 1 to indicate that the data has been handled by
166 * the struct_ops type, or the verifier will reject the map if
167 * the value of those fields is not zero.
169 khid_bpf_ops->hid_id = uhid_bpf_ops->hid_id;
171 case offsetof(struct hid_bpf_ops, flags):
172 if (uhid_bpf_ops->flags & ~BPF_F_BEFORE)
174 khid_bpf_ops->flags = uhid_bpf_ops->flags;
180 static int hid_bpf_reg(void *kdata, struct bpf_link *link)
182 struct hid_bpf_ops *ops = kdata;
183 struct hid_device *hdev;
186 /* prevent multiple attach of the same struct_ops */
190 hdev = hid_get_device(ops->hid_id);
192 return PTR_ERR(hdev);
196 mutex_lock(&hdev->bpf.prog_list_lock);
198 count = list_count_nodes(&hdev->bpf.prog_list);
199 if (count >= HID_BPF_MAX_PROGS_PER_DEV) {
204 if (ops->hid_rdesc_fixup) {
205 if (hdev->bpf.rdesc_ops) {
210 hdev->bpf.rdesc_ops = ops;
213 if (ops->hid_device_event) {
214 err = hid_bpf_allocate_event_data(hdev);
219 if (ops->flags & BPF_F_BEFORE)
220 list_add_rcu(&ops->list, &hdev->bpf.prog_list);
222 list_add_tail_rcu(&ops->list, &hdev->bpf.prog_list);
223 synchronize_srcu(&hdev->bpf.srcu);
226 mutex_unlock(&hdev->bpf.prog_list_lock);
229 if (hdev->bpf.rdesc_ops == ops)
230 hdev->bpf.rdesc_ops = NULL;
231 hid_put_device(hdev);
232 } else if (ops->hid_rdesc_fixup) {
233 hid_bpf_reconnect(hdev);
239 static void hid_bpf_unreg(void *kdata, struct bpf_link *link)
241 struct hid_bpf_ops *ops = kdata;
242 struct hid_device *hdev;
243 bool reconnect = false;
247 /* check if __hid_bpf_ops_destroy_device() has been called */
251 mutex_lock(&hdev->bpf.prog_list_lock);
253 list_del_rcu(&ops->list);
254 synchronize_srcu(&hdev->bpf.srcu);
257 reconnect = hdev->bpf.rdesc_ops == ops;
259 hdev->bpf.rdesc_ops = NULL;
261 mutex_unlock(&hdev->bpf.prog_list_lock);
264 hid_bpf_reconnect(hdev);
266 hid_put_device(hdev);
269 static int __hid_bpf_device_event(struct hid_bpf_ctx *ctx, enum hid_report_type type, u64 source)
274 static int __hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
279 static int __hid_bpf_hw_request(struct hid_bpf_ctx *ctx, unsigned char reportnum,
280 enum hid_report_type rtype, enum hid_class_request reqtype,
286 static int __hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, u64 source)
291 static struct hid_bpf_ops __bpf_hid_bpf_ops = {
292 .hid_device_event = __hid_bpf_device_event,
293 .hid_rdesc_fixup = __hid_bpf_rdesc_fixup,
294 .hid_hw_request = __hid_bpf_hw_request,
295 .hid_hw_output_report = __hid_bpf_hw_output_report,
298 static struct bpf_struct_ops bpf_hid_bpf_ops = {
299 .verifier_ops = &hid_bpf_verifier_ops,
300 .init = hid_bpf_ops_init,
301 .check_member = hid_bpf_ops_check_member,
302 .init_member = hid_bpf_ops_init_member,
304 .unreg = hid_bpf_unreg,
305 .name = "hid_bpf_ops",
306 .cfi_stubs = &__bpf_hid_bpf_ops,
307 .owner = THIS_MODULE,
310 void __hid_bpf_ops_destroy_device(struct hid_device *hdev)
312 struct hid_bpf_ops *e;
315 list_for_each_entry_rcu(e, &hdev->bpf.prog_list, list) {
316 hid_put_device(hdev);
322 static int __init hid_bpf_struct_ops_init(void)
324 return register_bpf_struct_ops(&bpf_hid_bpf_ops, hid_bpf_ops);
326 late_initcall(hid_bpf_struct_ops_init);