1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB
6 * draft-ietf-forces-interfelfb-03
9 * "Distributing Linux Traffic Control Classifier-Action
11 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
13 * copyright Jamal Hadi Salim (2015)
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <net/net_namespace.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
27 #include <net/pkt_cls.h>
28 #include <uapi/linux/tc_act/tc_ife.h>
29 #include <net/tc_act/tc_ife.h>
30 #include <linux/etherdevice.h>
33 static unsigned int ife_net_id;
34 static int max_metacnt = IFE_META_MAX + 1;
35 static struct tc_action_ops act_ife_ops;
37 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
38 [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
39 [TCA_IFE_DMAC] = { .len = ETH_ALEN},
40 [TCA_IFE_SMAC] = { .len = ETH_ALEN},
41 [TCA_IFE_TYPE] = { .type = NLA_U16},
44 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
49 edata = *(u16 *)mi->metaval;
53 if (!edata) /* will not encode */
57 return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
59 EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
61 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
64 return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
66 return nla_put(skb, mi->metaid, 0, NULL);
68 EXPORT_SYMBOL_GPL(ife_get_meta_u32);
70 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
72 if (metaval || mi->metaval)
73 return 8; /* T+L+V == 2+2+4 */
77 EXPORT_SYMBOL_GPL(ife_check_meta_u32);
79 int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
81 if (metaval || mi->metaval)
82 return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
86 EXPORT_SYMBOL_GPL(ife_check_meta_u16);
88 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
93 edata = *(u32 *)mi->metaval;
97 if (!edata) /* will not encode */
100 edata = htonl(edata);
101 return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
103 EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
105 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
108 return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
110 return nla_put(skb, mi->metaid, 0, NULL);
112 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
114 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
116 mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
122 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
124 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
126 mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
132 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
134 void ife_release_meta_gen(struct tcf_meta_info *mi)
138 EXPORT_SYMBOL_GPL(ife_release_meta_gen);
140 int ife_validate_meta_u32(void *val, int len)
142 if (len == sizeof(u32))
147 EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
149 int ife_validate_meta_u16(void *val, int len)
151 /* length will not include padding */
152 if (len == sizeof(u16))
157 EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
159 static LIST_HEAD(ifeoplist);
160 static DEFINE_RWLOCK(ife_mod_lock);
162 static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
164 struct tcf_meta_ops *o;
166 read_lock(&ife_mod_lock);
167 list_for_each_entry(o, &ifeoplist, list) {
168 if (o->metaid == metaid) {
169 if (!try_module_get(o->owner))
171 read_unlock(&ife_mod_lock);
175 read_unlock(&ife_mod_lock);
180 int register_ife_op(struct tcf_meta_ops *mops)
182 struct tcf_meta_ops *m;
184 if (!mops->metaid || !mops->metatype || !mops->name ||
185 !mops->check_presence || !mops->encode || !mops->decode ||
186 !mops->get || !mops->alloc)
189 write_lock(&ife_mod_lock);
191 list_for_each_entry(m, &ifeoplist, list) {
192 if (m->metaid == mops->metaid ||
193 (strcmp(mops->name, m->name) == 0)) {
194 write_unlock(&ife_mod_lock);
200 mops->release = ife_release_meta_gen;
202 list_add_tail(&mops->list, &ifeoplist);
203 write_unlock(&ife_mod_lock);
206 EXPORT_SYMBOL_GPL(unregister_ife_op);
208 int unregister_ife_op(struct tcf_meta_ops *mops)
210 struct tcf_meta_ops *m;
213 write_lock(&ife_mod_lock);
214 list_for_each_entry(m, &ifeoplist, list) {
215 if (m->metaid == mops->metaid) {
216 list_del(&mops->list);
221 write_unlock(&ife_mod_lock);
225 EXPORT_SYMBOL_GPL(register_ife_op);
227 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
230 /* XXX: unfortunately cant use nla_policy at this point
231 * because a length of 0 is valid in the case of
232 * "allow". "use" semantics do enforce for proper
233 * length and i couldve use nla_policy but it makes it hard
234 * to use it just for that..
237 return ops->validate(val, len);
239 if (ops->metatype == NLA_U32)
240 ret = ife_validate_meta_u32(val, len);
241 else if (ops->metatype == NLA_U16)
242 ret = ife_validate_meta_u16(val, len);
247 #ifdef CONFIG_MODULES
248 static const char *ife_meta_id2name(u32 metaid)
251 case IFE_META_SKBMARK:
255 case IFE_META_TCINDEX:
263 /* called when adding new meta information
265 static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
267 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
272 #ifdef CONFIG_MODULES
275 request_module("ife-meta-%s", ife_meta_id2name(metaid));
278 ops = find_ife_oplist(metaid);
285 ret = ife_validate_metatype(ops, val, len);
287 module_put(ops->owner);
293 /* called when adding new meta information
295 static int __add_metainfo(const struct tcf_meta_ops *ops,
296 struct tcf_ife_info *ife, u32 metaid, void *metaval,
297 int len, bool atomic, bool exists)
299 struct tcf_meta_info *mi = NULL;
302 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
309 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
317 spin_lock_bh(&ife->tcf_lock);
318 list_add_tail(&mi->metalist, &ife->metalist);
320 spin_unlock_bh(&ife->tcf_lock);
325 static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
326 struct tcf_ife_info *ife, u32 metaid,
331 if (!try_module_get(ops->owner))
333 ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
335 module_put(ops->owner);
339 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
340 int len, bool exists)
342 const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
347 ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
349 /*put back what find_ife_oplist took */
350 module_put(ops->owner);
354 static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
356 struct tcf_meta_ops *o;
360 read_lock(&ife_mod_lock);
361 list_for_each_entry(o, &ifeoplist, list) {
362 rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
366 read_unlock(&ife_mod_lock);
374 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
376 struct tcf_meta_info *e;
378 unsigned char *b = skb_tail_pointer(skb);
379 int total_encoded = 0;
381 /*can only happen on decode */
382 if (list_empty(&ife->metalist))
385 nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
389 list_for_each_entry(e, &ife->metalist, metalist) {
390 if (!e->ops->get(skb, e))
397 nla_nest_end(skb, nest);
406 /* under ife->tcf_lock */
407 static void _tcf_ife_cleanup(struct tc_action *a)
409 struct tcf_ife_info *ife = to_ife(a);
410 struct tcf_meta_info *e, *n;
412 list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
413 list_del(&e->metalist);
420 module_put(e->ops->owner);
425 static void tcf_ife_cleanup(struct tc_action *a)
427 struct tcf_ife_info *ife = to_ife(a);
428 struct tcf_ife_params *p;
430 spin_lock_bh(&ife->tcf_lock);
432 spin_unlock_bh(&ife->tcf_lock);
434 p = rcu_dereference_protected(ife->params, 1);
439 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
440 bool exists, bool rtnl_held)
447 for (i = 1; i < max_metacnt; i++) {
449 val = nla_data(tb[i]);
450 len = nla_len(tb[i]);
452 rc = load_metaops_and_vet(i, val, len, rtnl_held);
456 rc = add_metainfo(ife, i, val, len, exists);
465 static int tcf_ife_init(struct net *net, struct nlattr *nla,
466 struct nlattr *est, struct tc_action **a,
467 int ovr, int bind, bool rtnl_held,
468 struct tcf_proto *tp, u32 flags,
469 struct netlink_ext_ack *extack)
471 struct tc_action_net *tn = net_generic(net, ife_net_id);
472 struct nlattr *tb[TCA_IFE_MAX + 1];
473 struct nlattr *tb2[IFE_META_MAX + 1];
474 struct tcf_chain *goto_ch = NULL;
475 struct tcf_ife_params *p;
476 struct tcf_ife_info *ife;
477 u16 ife_type = ETH_P_IFE;
487 NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
491 err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
496 if (!tb[TCA_IFE_PARMS])
499 parm = nla_data(tb[TCA_IFE_PARMS]);
501 /* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because
502 * they cannot run as the same time. Check on all other values which
503 * are not supported right now.
505 if (parm->flags & ~IFE_ENCODE)
508 p = kzalloc(sizeof(*p), GFP_KERNEL);
513 err = tcf_idr_check_alloc(tn, &index, a, bind);
519 if (exists && bind) {
525 ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
528 tcf_idr_cleanup(tn, index);
534 tcf_idr_release(*a, bind);
540 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
544 p->flags = parm->flags;
546 if (parm->flags & IFE_ENCODE) {
547 if (tb[TCA_IFE_TYPE])
548 ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
549 if (tb[TCA_IFE_DMAC])
550 daddr = nla_data(tb[TCA_IFE_DMAC]);
551 if (tb[TCA_IFE_SMAC])
552 saddr = nla_data(tb[TCA_IFE_SMAC]);
555 if (parm->flags & IFE_ENCODE) {
557 ether_addr_copy(p->eth_dst, daddr);
559 eth_zero_addr(p->eth_dst);
562 ether_addr_copy(p->eth_src, saddr);
564 eth_zero_addr(p->eth_src);
566 p->eth_type = ife_type;
570 if (ret == ACT_P_CREATED)
571 INIT_LIST_HEAD(&ife->metalist);
573 if (tb[TCA_IFE_METALST]) {
574 err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
575 tb[TCA_IFE_METALST], NULL,
578 goto metadata_parse_err;
579 err = populate_metalist(ife, tb2, exists, rtnl_held);
581 goto metadata_parse_err;
584 /* if no passed metadata allow list or passed allow-all
585 * then here we process by adding as many supported metadatum
586 * as we can. You better have at least one else we are
589 err = use_all_metadata(ife, exists);
591 goto metadata_parse_err;
595 spin_lock_bh(&ife->tcf_lock);
596 /* protected by tcf_lock when modifying existing action */
597 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
598 rcu_swap_protected(ife->params, p, 1);
601 spin_unlock_bh(&ife->tcf_lock);
603 tcf_chain_put_by_act(goto_ch);
607 if (ret == ACT_P_CREATED)
608 tcf_idr_insert(tn, *a);
613 tcf_chain_put_by_act(goto_ch);
616 tcf_idr_release(*a, bind);
620 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
623 unsigned char *b = skb_tail_pointer(skb);
624 struct tcf_ife_info *ife = to_ife(a);
625 struct tcf_ife_params *p;
626 struct tc_ife opt = {
627 .index = ife->tcf_index,
628 .refcnt = refcount_read(&ife->tcf_refcnt) - ref,
629 .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind,
633 spin_lock_bh(&ife->tcf_lock);
634 opt.action = ife->tcf_action;
635 p = rcu_dereference_protected(ife->params,
636 lockdep_is_held(&ife->tcf_lock));
637 opt.flags = p->flags;
639 if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
640 goto nla_put_failure;
642 tcf_tm_dump(&t, &ife->tcf_tm);
643 if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
644 goto nla_put_failure;
646 if (!is_zero_ether_addr(p->eth_dst)) {
647 if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst))
648 goto nla_put_failure;
651 if (!is_zero_ether_addr(p->eth_src)) {
652 if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src))
653 goto nla_put_failure;
656 if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
657 goto nla_put_failure;
659 if (dump_metalist(skb, ife)) {
660 /*ignore failure to dump metalist */
661 pr_info("Failed to dump metalist\n");
664 spin_unlock_bh(&ife->tcf_lock);
668 spin_unlock_bh(&ife->tcf_lock);
673 static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
674 u16 metaid, u16 mlen, void *mdata)
676 struct tcf_meta_info *e;
678 /* XXX: use hash to speed up */
679 list_for_each_entry(e, &ife->metalist, metalist) {
680 if (metaid == e->metaid) {
682 /* We check for decode presence already */
683 return e->ops->decode(skb, mdata, mlen);
691 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
692 struct tcf_result *res)
694 struct tcf_ife_info *ife = to_ife(a);
695 int action = ife->tcf_action;
700 bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
701 tcf_lastuse_update(&ife->tcf_tm);
703 if (skb_at_tc_ingress(skb))
704 skb_push(skb, skb->dev->hard_header_len);
706 tlv_data = ife_decode(skb, &metalen);
707 if (unlikely(!tlv_data)) {
708 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
712 ifehdr_end = tlv_data + metalen;
713 for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) {
718 curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
721 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
725 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
726 /* abuse overlimits to count when we receive metadata
727 * but dont have an ops for it
729 pr_info_ratelimited("Unknown metaid %d dlen %d\n",
731 qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
735 if (WARN_ON(tlv_data != ifehdr_end)) {
736 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
740 skb->protocol = eth_type_trans(skb, skb->dev);
741 skb_reset_network_header(skb);
746 /*XXX: check if we can do this at install time instead of current
749 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
751 struct tcf_meta_info *e, *n;
752 int tot_run_sz = 0, run_sz = 0;
754 list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
755 if (e->ops->check_presence) {
756 run_sz = e->ops->check_presence(skb, e);
757 tot_run_sz += run_sz;
764 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
765 struct tcf_result *res, struct tcf_ife_params *p)
767 struct tcf_ife_info *ife = to_ife(a);
768 int action = ife->tcf_action;
769 struct ethhdr *oethh; /* outer ether header */
770 struct tcf_meta_info *e;
772 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
773 where ORIGDATA = original ethernet header ...
775 u16 metalen = ife_get_sz(skb, ife);
776 int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
777 unsigned int skboff = 0;
778 int new_len = skb->len + hdrm;
779 bool exceed_mtu = false;
783 if (!skb_at_tc_ingress(skb)) {
784 if (new_len > skb->dev->mtu)
788 bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
789 tcf_lastuse_update(&ife->tcf_tm);
791 if (!metalen) { /* no metadata to send */
792 /* abuse overlimits to count when we allow packet
795 qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
798 /* could be stupid policy setup or mtu config
799 * so lets be conservative.. */
800 if ((action == TC_ACT_SHOT) || exceed_mtu) {
801 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
805 if (skb_at_tc_ingress(skb))
806 skb_push(skb, skb->dev->hard_header_len);
808 ife_meta = ife_encode(skb, metalen);
810 spin_lock(&ife->tcf_lock);
812 /* XXX: we dont have a clever way of telling encode to
813 * not repeat some of the computations that are done by
814 * ops->presence_check...
816 list_for_each_entry(e, &ife->metalist, metalist) {
817 if (e->ops->encode) {
818 err = e->ops->encode(skb, (void *)(ife_meta + skboff),
822 /* too corrupt to keep around if overwritten */
823 spin_unlock(&ife->tcf_lock);
824 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
829 spin_unlock(&ife->tcf_lock);
830 oethh = (struct ethhdr *)skb->data;
832 if (!is_zero_ether_addr(p->eth_src))
833 ether_addr_copy(oethh->h_source, p->eth_src);
834 if (!is_zero_ether_addr(p->eth_dst))
835 ether_addr_copy(oethh->h_dest, p->eth_dst);
836 oethh->h_proto = htons(p->eth_type);
838 if (skb_at_tc_ingress(skb))
839 skb_pull(skb, skb->dev->hard_header_len);
844 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
845 struct tcf_result *res)
847 struct tcf_ife_info *ife = to_ife(a);
848 struct tcf_ife_params *p;
851 p = rcu_dereference_bh(ife->params);
852 if (p->flags & IFE_ENCODE) {
853 ret = tcf_ife_encode(skb, a, res, p);
857 return tcf_ife_decode(skb, a, res);
860 static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
861 struct netlink_callback *cb, int type,
862 const struct tc_action_ops *ops,
863 struct netlink_ext_ack *extack)
865 struct tc_action_net *tn = net_generic(net, ife_net_id);
867 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
870 static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
872 struct tc_action_net *tn = net_generic(net, ife_net_id);
874 return tcf_idr_search(tn, a, index);
877 static struct tc_action_ops act_ife_ops = {
880 .owner = THIS_MODULE,
882 .dump = tcf_ife_dump,
883 .cleanup = tcf_ife_cleanup,
884 .init = tcf_ife_init,
885 .walk = tcf_ife_walker,
886 .lookup = tcf_ife_search,
887 .size = sizeof(struct tcf_ife_info),
890 static __net_init int ife_init_net(struct net *net)
892 struct tc_action_net *tn = net_generic(net, ife_net_id);
894 return tc_action_net_init(net, tn, &act_ife_ops);
897 static void __net_exit ife_exit_net(struct list_head *net_list)
899 tc_action_net_exit(net_list, ife_net_id);
902 static struct pernet_operations ife_net_ops = {
903 .init = ife_init_net,
904 .exit_batch = ife_exit_net,
906 .size = sizeof(struct tc_action_net),
909 static int __init ife_init_module(void)
911 return tcf_register_action(&act_ife_ops, &ife_net_ops);
914 static void __exit ife_cleanup_module(void)
916 tcf_unregister_action(&act_ife_ops, &ife_net_ops);
919 module_init(ife_init_module);
920 module_exit(ife_cleanup_module);
922 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
923 MODULE_DESCRIPTION("Inter-FE LFB action");
924 MODULE_LICENSE("GPL");