2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 #include <linux/idr.h>
22 #include <net/rtnetlink.h>
23 #include <net/pkt_cls.h>
26 MODULE_LICENSE("GPL");
28 MODULE_DESCRIPTION("TC BPF based classifier");
30 #define CLS_BPF_NAME_LEN 256
31 #define CLS_BPF_SUPPORTED_GEN_FLAGS \
32 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
35 struct list_head plist;
36 struct idr handle_idr;
41 struct bpf_prog *filter;
42 struct list_head link;
43 struct tcf_result res;
46 unsigned int in_hw_count;
50 struct sock_filter *bpf_ops;
53 struct rcu_work rwork;
56 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
57 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
58 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
59 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
60 [TCA_BPF_FD] = { .type = NLA_U32 },
61 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
62 .len = CLS_BPF_NAME_LEN },
63 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
64 [TCA_BPF_OPS] = { .type = NLA_BINARY,
65 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
68 static int cls_bpf_exec_opcode(int code)
83 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
84 struct tcf_result *res)
86 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
87 bool at_ingress = skb_at_tc_ingress(skb);
88 struct cls_bpf_prog *prog;
91 /* Needed here for accessing maps. */
93 list_for_each_entry_rcu(prog, &head->plist, link) {
96 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
98 if (tc_skip_sw(prog->gen_flags)) {
99 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
100 } else if (at_ingress) {
101 /* It is safe to push/pull even if skb_shared() */
102 __skb_push(skb, skb->mac_len);
103 bpf_compute_data_pointers(skb);
104 filter_res = BPF_PROG_RUN(prog->filter, skb);
105 __skb_pull(skb, skb->mac_len);
107 bpf_compute_data_pointers(skb);
108 filter_res = BPF_PROG_RUN(prog->filter, skb);
111 if (prog->exts_integrated) {
113 res->classid = TC_H_MAJ(prog->res.classid) |
114 qdisc_skb_cb(skb)->tc_classid;
116 ret = cls_bpf_exec_opcode(filter_res);
117 if (ret == TC_ACT_UNSPEC)
124 if (filter_res != -1) {
126 res->classid = filter_res;
131 ret = tcf_exts_exec(skb, &prog->exts, res);
142 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
144 return !prog->bpf_ops;
147 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
148 struct cls_bpf_prog *oldprog,
149 struct netlink_ext_ack *extack)
151 struct tcf_block *block = tp->chain->block;
152 struct tc_cls_bpf_offload cls_bpf = {};
153 struct cls_bpf_prog *obj;
157 skip_sw = prog && tc_skip_sw(prog->gen_flags);
158 obj = prog ?: oldprog;
160 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags,
162 cls_bpf.command = TC_CLSBPF_OFFLOAD;
163 cls_bpf.exts = &obj->exts;
164 cls_bpf.prog = prog ? prog->filter : NULL;
165 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
166 cls_bpf.name = obj->bpf_name;
167 cls_bpf.exts_integrated = obj->exts_integrated;
170 tcf_block_offload_dec(block, &oldprog->gen_flags);
172 err = tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
175 cls_bpf_offload_cmd(tp, oldprog, prog, extack);
177 } else if (err > 0) {
178 prog->in_hw_count = err;
179 tcf_block_offload_inc(block, &prog->gen_flags);
183 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
189 static u32 cls_bpf_flags(u32 flags)
191 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
194 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
195 struct cls_bpf_prog *oldprog,
196 struct netlink_ext_ack *extack)
198 if (prog && oldprog &&
199 cls_bpf_flags(prog->gen_flags) !=
200 cls_bpf_flags(oldprog->gen_flags))
203 if (prog && tc_skip_hw(prog->gen_flags))
205 if (oldprog && tc_skip_hw(oldprog->gen_flags))
207 if (!prog && !oldprog)
210 return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
213 static void cls_bpf_stop_offload(struct tcf_proto *tp,
214 struct cls_bpf_prog *prog,
215 struct netlink_ext_ack *extack)
219 err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
221 pr_err("Stopping hardware offload failed: %d\n", err);
224 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
225 struct cls_bpf_prog *prog)
227 struct tcf_block *block = tp->chain->block;
228 struct tc_cls_bpf_offload cls_bpf = {};
230 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
231 cls_bpf.command = TC_CLSBPF_STATS;
232 cls_bpf.exts = &prog->exts;
233 cls_bpf.prog = prog->filter;
234 cls_bpf.name = prog->bpf_name;
235 cls_bpf.exts_integrated = prog->exts_integrated;
237 tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false);
240 static int cls_bpf_init(struct tcf_proto *tp)
242 struct cls_bpf_head *head;
244 head = kzalloc(sizeof(*head), GFP_KERNEL);
248 INIT_LIST_HEAD_RCU(&head->plist);
249 idr_init(&head->handle_idr);
250 rcu_assign_pointer(tp->root, head);
255 static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
257 if (cls_bpf_is_ebpf(prog))
258 bpf_prog_put(prog->filter);
260 bpf_prog_destroy(prog->filter);
262 kfree(prog->bpf_name);
263 kfree(prog->bpf_ops);
266 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
268 tcf_exts_destroy(&prog->exts);
269 tcf_exts_put_net(&prog->exts);
271 cls_bpf_free_parms(prog);
275 static void cls_bpf_delete_prog_work(struct work_struct *work)
277 struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
281 __cls_bpf_delete_prog(prog);
285 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
286 struct netlink_ext_ack *extack)
288 struct cls_bpf_head *head = rtnl_dereference(tp->root);
290 idr_remove(&head->handle_idr, prog->handle);
291 cls_bpf_stop_offload(tp, prog, extack);
292 list_del_rcu(&prog->link);
293 tcf_unbind_filter(tp, &prog->res);
294 if (tcf_exts_get_net(&prog->exts))
295 tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
297 __cls_bpf_delete_prog(prog);
300 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
301 bool rtnl_held, struct netlink_ext_ack *extack)
303 struct cls_bpf_head *head = rtnl_dereference(tp->root);
305 __cls_bpf_delete(tp, arg, extack);
306 *last = list_empty(&head->plist);
310 static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
311 struct netlink_ext_ack *extack)
313 struct cls_bpf_head *head = rtnl_dereference(tp->root);
314 struct cls_bpf_prog *prog, *tmp;
316 list_for_each_entry_safe(prog, tmp, &head->plist, link)
317 __cls_bpf_delete(tp, prog, extack);
319 idr_destroy(&head->handle_idr);
320 kfree_rcu(head, rcu);
323 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
325 struct cls_bpf_head *head = rtnl_dereference(tp->root);
326 struct cls_bpf_prog *prog;
328 list_for_each_entry(prog, &head->plist, link) {
329 if (prog->handle == handle)
336 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
338 struct sock_filter *bpf_ops;
339 struct sock_fprog_kern fprog_tmp;
341 u16 bpf_size, bpf_num_ops;
344 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
345 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
348 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
349 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
352 bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
356 fprog_tmp.len = bpf_num_ops;
357 fprog_tmp.filter = bpf_ops;
359 ret = bpf_prog_create(&fp, &fprog_tmp);
365 prog->bpf_ops = bpf_ops;
366 prog->bpf_num_ops = bpf_num_ops;
367 prog->bpf_name = NULL;
373 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
374 u32 gen_flags, const struct tcf_proto *tp)
381 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
382 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
384 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
388 if (tb[TCA_BPF_NAME]) {
389 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
396 prog->bpf_ops = NULL;
397 prog->bpf_name = name;
401 tcf_block_netif_keep_dst(tp->chain->block);
406 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
407 struct cls_bpf_prog *prog, unsigned long base,
408 struct nlattr **tb, struct nlattr *est, bool ovr,
409 struct netlink_ext_ack *extack)
411 bool is_bpf, is_ebpf, have_exts = false;
415 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
416 is_ebpf = tb[TCA_BPF_FD];
417 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
420 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, true,
425 if (tb[TCA_BPF_FLAGS]) {
426 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
428 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
431 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
433 if (tb[TCA_BPF_FLAGS_GEN]) {
434 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
435 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
436 !tc_flags_valid(gen_flags))
440 prog->exts_integrated = have_exts;
441 prog->gen_flags = gen_flags;
443 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
444 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
448 if (tb[TCA_BPF_CLASSID]) {
449 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
450 tcf_bind_filter(tp, &prog->res, base);
456 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
457 struct tcf_proto *tp, unsigned long base,
458 u32 handle, struct nlattr **tca,
459 void **arg, bool ovr, bool rtnl_held,
460 struct netlink_ext_ack *extack)
462 struct cls_bpf_head *head = rtnl_dereference(tp->root);
463 struct cls_bpf_prog *oldprog = *arg;
464 struct nlattr *tb[TCA_BPF_MAX + 1];
465 struct cls_bpf_prog *prog;
468 if (tca[TCA_OPTIONS] == NULL)
471 ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
476 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
480 ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
485 if (handle && oldprog->handle != handle) {
493 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
494 INT_MAX, GFP_KERNEL);
495 } else if (!oldprog) {
496 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
502 prog->handle = handle;
504 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
509 ret = cls_bpf_offload(tp, prog, oldprog, extack);
513 if (!tc_in_hw(prog->gen_flags))
514 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
517 idr_replace(&head->handle_idr, prog, handle);
518 list_replace_rcu(&oldprog->link, &prog->link);
519 tcf_unbind_filter(tp, &oldprog->res);
520 tcf_exts_get_net(&oldprog->exts);
521 tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
523 list_add_rcu(&prog->link, &head->plist);
530 cls_bpf_free_parms(prog);
533 idr_remove(&head->handle_idr, prog->handle);
535 tcf_exts_destroy(&prog->exts);
540 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
545 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
548 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
549 sizeof(struct sock_filter));
553 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
558 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
563 if (prog->bpf_name &&
564 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
567 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
570 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
574 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
579 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
580 struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
582 struct cls_bpf_prog *prog = fh;
590 tm->tcm_handle = prog->handle;
592 cls_bpf_offload_update_stats(tp, prog);
594 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
596 goto nla_put_failure;
598 if (prog->res.classid &&
599 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
600 goto nla_put_failure;
602 if (cls_bpf_is_ebpf(prog))
603 ret = cls_bpf_dump_ebpf_info(prog, skb);
605 ret = cls_bpf_dump_bpf_info(prog, skb);
607 goto nla_put_failure;
609 if (tcf_exts_dump(skb, &prog->exts) < 0)
610 goto nla_put_failure;
612 if (prog->exts_integrated)
613 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
614 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
615 goto nla_put_failure;
616 if (prog->gen_flags &&
617 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
618 goto nla_put_failure;
620 nla_nest_end(skb, nest);
622 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
623 goto nla_put_failure;
628 nla_nest_cancel(skb, nest);
632 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
634 struct cls_bpf_prog *prog = fh;
636 if (prog && prog->res.classid == classid)
637 prog->res.class = cl;
640 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
643 struct cls_bpf_head *head = rtnl_dereference(tp->root);
644 struct cls_bpf_prog *prog;
646 list_for_each_entry(prog, &head->plist, link) {
647 if (arg->count < arg->skip)
649 if (arg->fn(tp, prog, arg) < 0) {
658 static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
659 void *cb_priv, struct netlink_ext_ack *extack)
661 struct cls_bpf_head *head = rtnl_dereference(tp->root);
662 struct tcf_block *block = tp->chain->block;
663 struct tc_cls_bpf_offload cls_bpf = {};
664 struct cls_bpf_prog *prog;
667 list_for_each_entry(prog, &head->plist, link) {
668 if (tc_skip_hw(prog->gen_flags))
671 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
673 cls_bpf.command = TC_CLSBPF_OFFLOAD;
674 cls_bpf.exts = &prog->exts;
675 cls_bpf.prog = add ? prog->filter : NULL;
676 cls_bpf.oldprog = add ? NULL : prog->filter;
677 cls_bpf.name = prog->bpf_name;
678 cls_bpf.exts_integrated = prog->exts_integrated;
680 err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv);
682 if (add && tc_skip_sw(prog->gen_flags))
687 tc_cls_offload_cnt_update(block, &prog->in_hw_count,
688 &prog->gen_flags, add);
694 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
696 .owner = THIS_MODULE,
697 .classify = cls_bpf_classify,
698 .init = cls_bpf_init,
699 .destroy = cls_bpf_destroy,
701 .change = cls_bpf_change,
702 .delete = cls_bpf_delete,
703 .walk = cls_bpf_walk,
704 .reoffload = cls_bpf_reoffload,
705 .dump = cls_bpf_dump,
706 .bind_class = cls_bpf_bind_class,
709 static int __init cls_bpf_init_mod(void)
711 return register_tcf_proto_ops(&cls_bpf_ops);
714 static void __exit cls_bpf_exit_mod(void)
716 unregister_tcf_proto_ops(&cls_bpf_ops);
719 module_init(cls_bpf_init_mod);
720 module_exit(cls_bpf_exit_mod);