1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SR-IPv6 implementation
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
12 #include <linux/net.h>
13 #include <linux/module.h>
15 #include <net/lwtunnel.h>
16 #include <net/netevent.h>
17 #include <net/netns/generic.h>
18 #include <net/ip6_fib.h>
19 #include <net/route.h>
21 #include <linux/seg6.h>
22 #include <linux/seg6_local.h>
23 #include <net/addrconf.h>
24 #include <net/ip6_route.h>
25 #include <net/dst_cache.h>
26 #ifdef CONFIG_IPV6_SEG6_HMAC
27 #include <net/seg6_hmac.h>
29 #include <net/seg6_local.h>
30 #include <linux/etherdevice.h>
31 #include <linux/bpf.h>
33 struct seg6_local_lwt;
35 struct seg6_action_desc {
38 int (*input)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
43 struct bpf_prog *prog;
47 struct seg6_local_lwt {
49 struct ipv6_sr_hdr *srh;
55 struct bpf_lwt_prog bpf;
58 struct seg6_action_desc *desc;
61 static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
63 return (struct seg6_local_lwt *)lwt->data;
66 static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
68 struct ipv6_sr_hdr *srh;
71 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
74 if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
77 srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
79 len = (srh->hdrlen + 1) << 3;
81 if (!pskb_may_pull(skb, srhoff + len))
84 /* note that pskb_may_pull may change pointers in header;
85 * for this reason it is necessary to reload them when needed.
87 srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
89 if (!seg6_validate_srh(srh, len))
95 static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
97 struct ipv6_sr_hdr *srh;
103 if (srh->segments_left == 0)
106 #ifdef CONFIG_IPV6_SEG6_HMAC
107 if (!seg6_hmac_validate_skb(skb))
114 static bool decap_and_validate(struct sk_buff *skb, int proto)
116 struct ipv6_sr_hdr *srh;
117 unsigned int off = 0;
120 if (srh && srh->segments_left > 0)
123 #ifdef CONFIG_IPV6_SEG6_HMAC
124 if (srh && !seg6_hmac_validate_skb(skb))
128 if (ipv6_find_hdr(skb, &off, proto, NULL, NULL) < 0)
131 if (!pskb_pull(skb, off))
134 skb_postpull_rcsum(skb, skb_network_header(skb), off);
136 skb_reset_network_header(skb);
137 skb_reset_transport_header(skb);
138 skb->encapsulation = 0;
143 static void advance_nextseg(struct ipv6_sr_hdr *srh, struct in6_addr *daddr)
145 struct in6_addr *addr;
147 srh->segments_left--;
148 addr = srh->segments + srh->segments_left;
153 seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
154 u32 tbl_id, bool local_delivery)
156 struct net *net = dev_net(skb->dev);
157 struct ipv6hdr *hdr = ipv6_hdr(skb);
158 int flags = RT6_LOOKUP_F_HAS_SADDR;
159 struct dst_entry *dst = NULL;
164 fl6.flowi6_iif = skb->dev->ifindex;
165 fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
166 fl6.saddr = hdr->saddr;
167 fl6.flowlabel = ip6_flowinfo(hdr);
168 fl6.flowi6_mark = skb->mark;
169 fl6.flowi6_proto = hdr->nexthdr;
172 fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
175 dst = ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags);
177 struct fib6_table *table;
179 table = fib6_get_table(net, tbl_id);
183 rt = ip6_pol_route(net, table, 0, &fl6, skb, flags);
187 /* we want to discard traffic destined for local packet processing,
188 * if @local_delivery is set to false.
191 dev_flags |= IFF_LOOPBACK;
193 if (dst && (dst->dev->flags & dev_flags) && !dst->error) {
200 rt = net->ipv6.ip6_blk_hole_entry;
206 skb_dst_set(skb, dst);
210 int seg6_lookup_nexthop(struct sk_buff *skb,
211 struct in6_addr *nhaddr, u32 tbl_id)
213 return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false);
216 /* regular endpoint function */
217 static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt)
219 struct ipv6_sr_hdr *srh;
221 srh = get_and_validate_srh(skb);
225 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
227 seg6_lookup_nexthop(skb, NULL, 0);
229 return dst_input(skb);
236 /* regular endpoint, and forward to specified nexthop */
237 static int input_action_end_x(struct sk_buff *skb, struct seg6_local_lwt *slwt)
239 struct ipv6_sr_hdr *srh;
241 srh = get_and_validate_srh(skb);
245 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
247 seg6_lookup_nexthop(skb, &slwt->nh6, 0);
249 return dst_input(skb);
256 static int input_action_end_t(struct sk_buff *skb, struct seg6_local_lwt *slwt)
258 struct ipv6_sr_hdr *srh;
260 srh = get_and_validate_srh(skb);
264 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
266 seg6_lookup_nexthop(skb, NULL, slwt->table);
268 return dst_input(skb);
275 /* decapsulate and forward inner L2 frame on specified interface */
276 static int input_action_end_dx2(struct sk_buff *skb,
277 struct seg6_local_lwt *slwt)
279 struct net *net = dev_net(skb->dev);
280 struct net_device *odev;
283 if (!decap_and_validate(skb, NEXTHDR_NONE))
286 if (!pskb_may_pull(skb, ETH_HLEN))
289 skb_reset_mac_header(skb);
290 eth = (struct ethhdr *)skb->data;
292 /* To determine the frame's protocol, we assume it is 802.3. This avoids
293 * a call to eth_type_trans(), which is not really relevant for our
296 if (!eth_proto_is_802_3(eth->h_proto))
299 odev = dev_get_by_index_rcu(net, slwt->oif);
303 /* As we accept Ethernet frames, make sure the egress device is of
306 if (odev->type != ARPHRD_ETHER)
309 if (!(odev->flags & IFF_UP) || !netif_carrier_ok(odev))
314 if (skb_warn_if_lro(skb))
317 skb_forward_csum(skb);
319 if (skb->len - ETH_HLEN > odev->mtu)
323 skb->protocol = eth->h_proto;
325 return dev_queue_xmit(skb);
332 /* decapsulate and forward to specified nexthop */
333 static int input_action_end_dx6(struct sk_buff *skb,
334 struct seg6_local_lwt *slwt)
336 struct in6_addr *nhaddr = NULL;
338 /* this function accepts IPv6 encapsulated packets, with either
339 * an SRH with SL=0, or no SRH.
342 if (!decap_and_validate(skb, IPPROTO_IPV6))
345 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
348 /* The inner packet is not associated to any local interface,
349 * so we do not call netif_rx().
351 * If slwt->nh6 is set to ::, then lookup the nexthop for the
352 * inner packet's DA. Otherwise, use the specified nexthop.
355 if (!ipv6_addr_any(&slwt->nh6))
358 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
360 seg6_lookup_nexthop(skb, nhaddr, 0);
362 return dst_input(skb);
368 static int input_action_end_dx4(struct sk_buff *skb,
369 struct seg6_local_lwt *slwt)
375 if (!decap_and_validate(skb, IPPROTO_IPIP))
378 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
381 skb->protocol = htons(ETH_P_IP);
385 nhaddr = slwt->nh4.s_addr ?: iph->daddr;
389 skb_set_transport_header(skb, sizeof(struct iphdr));
391 err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
395 return dst_input(skb);
402 static int input_action_end_dt6(struct sk_buff *skb,
403 struct seg6_local_lwt *slwt)
405 if (!decap_and_validate(skb, IPPROTO_IPV6))
408 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
411 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
413 seg6_lookup_any_nexthop(skb, NULL, slwt->table, true);
415 return dst_input(skb);
422 /* push an SRH on top of the current one */
423 static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
425 struct ipv6_sr_hdr *srh;
428 srh = get_and_validate_srh(skb);
432 err = seg6_do_srh_inline(skb, slwt->srh);
436 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
437 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
439 seg6_lookup_nexthop(skb, NULL, 0);
441 return dst_input(skb);
448 /* encapsulate within an outer IPv6 header and a specified SRH */
449 static int input_action_end_b6_encap(struct sk_buff *skb,
450 struct seg6_local_lwt *slwt)
452 struct ipv6_sr_hdr *srh;
455 srh = get_and_validate_srh(skb);
459 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
461 skb_reset_inner_headers(skb);
462 skb->encapsulation = 1;
464 err = seg6_do_srh_encap(skb, slwt->srh, IPPROTO_IPV6);
468 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
469 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
471 seg6_lookup_nexthop(skb, NULL, 0);
473 return dst_input(skb);
480 DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
482 bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
484 struct seg6_bpf_srh_state *srh_state =
485 this_cpu_ptr(&seg6_bpf_srh_states);
486 struct ipv6_sr_hdr *srh = srh_state->srh;
488 if (unlikely(srh == NULL))
491 if (unlikely(!srh_state->valid)) {
492 if ((srh_state->hdrlen & 7) != 0)
495 srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
496 if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3))
499 srh_state->valid = true;
505 static int input_action_end_bpf(struct sk_buff *skb,
506 struct seg6_local_lwt *slwt)
508 struct seg6_bpf_srh_state *srh_state =
509 this_cpu_ptr(&seg6_bpf_srh_states);
510 struct ipv6_sr_hdr *srh;
513 srh = get_and_validate_srh(skb);
518 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
520 /* preempt_disable is needed to protect the per-CPU buffer srh_state,
521 * which is also accessed by the bpf_lwt_seg6_* helpers
524 srh_state->srh = srh;
525 srh_state->hdrlen = srh->hdrlen << 3;
526 srh_state->valid = true;
529 bpf_compute_data_pointers(skb);
530 ret = bpf_prog_run_save_cb(slwt->bpf.prog, skb);
540 pr_warn_once("bpf-seg6local: Illegal return value %u\n", ret);
544 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
548 if (ret != BPF_REDIRECT)
549 seg6_lookup_nexthop(skb, NULL, 0);
551 return dst_input(skb);
559 static struct seg6_action_desc seg6_action_table[] = {
561 .action = SEG6_LOCAL_ACTION_END,
563 .input = input_action_end,
566 .action = SEG6_LOCAL_ACTION_END_X,
567 .attrs = (1 << SEG6_LOCAL_NH6),
568 .input = input_action_end_x,
571 .action = SEG6_LOCAL_ACTION_END_T,
572 .attrs = (1 << SEG6_LOCAL_TABLE),
573 .input = input_action_end_t,
576 .action = SEG6_LOCAL_ACTION_END_DX2,
577 .attrs = (1 << SEG6_LOCAL_OIF),
578 .input = input_action_end_dx2,
581 .action = SEG6_LOCAL_ACTION_END_DX6,
582 .attrs = (1 << SEG6_LOCAL_NH6),
583 .input = input_action_end_dx6,
586 .action = SEG6_LOCAL_ACTION_END_DX4,
587 .attrs = (1 << SEG6_LOCAL_NH4),
588 .input = input_action_end_dx4,
591 .action = SEG6_LOCAL_ACTION_END_DT6,
592 .attrs = (1 << SEG6_LOCAL_TABLE),
593 .input = input_action_end_dt6,
596 .action = SEG6_LOCAL_ACTION_END_B6,
597 .attrs = (1 << SEG6_LOCAL_SRH),
598 .input = input_action_end_b6,
601 .action = SEG6_LOCAL_ACTION_END_B6_ENCAP,
602 .attrs = (1 << SEG6_LOCAL_SRH),
603 .input = input_action_end_b6_encap,
604 .static_headroom = sizeof(struct ipv6hdr),
607 .action = SEG6_LOCAL_ACTION_END_BPF,
608 .attrs = (1 << SEG6_LOCAL_BPF),
609 .input = input_action_end_bpf,
614 static struct seg6_action_desc *__get_action_desc(int action)
616 struct seg6_action_desc *desc;
619 count = ARRAY_SIZE(seg6_action_table);
620 for (i = 0; i < count; i++) {
621 desc = &seg6_action_table[i];
622 if (desc->action == action)
629 static int seg6_local_input(struct sk_buff *skb)
631 struct dst_entry *orig_dst = skb_dst(skb);
632 struct seg6_action_desc *desc;
633 struct seg6_local_lwt *slwt;
635 if (skb->protocol != htons(ETH_P_IPV6)) {
640 slwt = seg6_local_lwtunnel(orig_dst->lwtstate);
643 return desc->input(skb, slwt);
646 static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = {
647 [SEG6_LOCAL_ACTION] = { .type = NLA_U32 },
648 [SEG6_LOCAL_SRH] = { .type = NLA_BINARY },
649 [SEG6_LOCAL_TABLE] = { .type = NLA_U32 },
650 [SEG6_LOCAL_NH4] = { .type = NLA_BINARY,
651 .len = sizeof(struct in_addr) },
652 [SEG6_LOCAL_NH6] = { .type = NLA_BINARY,
653 .len = sizeof(struct in6_addr) },
654 [SEG6_LOCAL_IIF] = { .type = NLA_U32 },
655 [SEG6_LOCAL_OIF] = { .type = NLA_U32 },
656 [SEG6_LOCAL_BPF] = { .type = NLA_NESTED },
659 static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt)
661 struct ipv6_sr_hdr *srh;
664 srh = nla_data(attrs[SEG6_LOCAL_SRH]);
665 len = nla_len(attrs[SEG6_LOCAL_SRH]);
667 /* SRH must contain at least one segment */
668 if (len < sizeof(*srh) + sizeof(struct in6_addr))
671 if (!seg6_validate_srh(srh, len))
674 slwt->srh = kmemdup(srh, len, GFP_KERNEL);
678 slwt->headroom += len;
683 static int put_nla_srh(struct sk_buff *skb, struct seg6_local_lwt *slwt)
685 struct ipv6_sr_hdr *srh;
690 len = (srh->hdrlen + 1) << 3;
692 nla = nla_reserve(skb, SEG6_LOCAL_SRH, len);
696 memcpy(nla_data(nla), srh, len);
701 static int cmp_nla_srh(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
703 int len = (a->srh->hdrlen + 1) << 3;
705 if (len != ((b->srh->hdrlen + 1) << 3))
708 return memcmp(a->srh, b->srh, len);
711 static int parse_nla_table(struct nlattr **attrs, struct seg6_local_lwt *slwt)
713 slwt->table = nla_get_u32(attrs[SEG6_LOCAL_TABLE]);
718 static int put_nla_table(struct sk_buff *skb, struct seg6_local_lwt *slwt)
720 if (nla_put_u32(skb, SEG6_LOCAL_TABLE, slwt->table))
726 static int cmp_nla_table(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
728 if (a->table != b->table)
734 static int parse_nla_nh4(struct nlattr **attrs, struct seg6_local_lwt *slwt)
736 memcpy(&slwt->nh4, nla_data(attrs[SEG6_LOCAL_NH4]),
737 sizeof(struct in_addr));
742 static int put_nla_nh4(struct sk_buff *skb, struct seg6_local_lwt *slwt)
746 nla = nla_reserve(skb, SEG6_LOCAL_NH4, sizeof(struct in_addr));
750 memcpy(nla_data(nla), &slwt->nh4, sizeof(struct in_addr));
755 static int cmp_nla_nh4(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
757 return memcmp(&a->nh4, &b->nh4, sizeof(struct in_addr));
760 static int parse_nla_nh6(struct nlattr **attrs, struct seg6_local_lwt *slwt)
762 memcpy(&slwt->nh6, nla_data(attrs[SEG6_LOCAL_NH6]),
763 sizeof(struct in6_addr));
768 static int put_nla_nh6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
772 nla = nla_reserve(skb, SEG6_LOCAL_NH6, sizeof(struct in6_addr));
776 memcpy(nla_data(nla), &slwt->nh6, sizeof(struct in6_addr));
781 static int cmp_nla_nh6(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
783 return memcmp(&a->nh6, &b->nh6, sizeof(struct in6_addr));
786 static int parse_nla_iif(struct nlattr **attrs, struct seg6_local_lwt *slwt)
788 slwt->iif = nla_get_u32(attrs[SEG6_LOCAL_IIF]);
793 static int put_nla_iif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
795 if (nla_put_u32(skb, SEG6_LOCAL_IIF, slwt->iif))
801 static int cmp_nla_iif(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
803 if (a->iif != b->iif)
809 static int parse_nla_oif(struct nlattr **attrs, struct seg6_local_lwt *slwt)
811 slwt->oif = nla_get_u32(attrs[SEG6_LOCAL_OIF]);
816 static int put_nla_oif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
818 if (nla_put_u32(skb, SEG6_LOCAL_OIF, slwt->oif))
824 static int cmp_nla_oif(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
826 if (a->oif != b->oif)
832 #define MAX_PROG_NAME 256
833 static const struct nla_policy bpf_prog_policy[SEG6_LOCAL_BPF_PROG_MAX + 1] = {
834 [SEG6_LOCAL_BPF_PROG] = { .type = NLA_U32, },
835 [SEG6_LOCAL_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
836 .len = MAX_PROG_NAME },
839 static int parse_nla_bpf(struct nlattr **attrs, struct seg6_local_lwt *slwt)
841 struct nlattr *tb[SEG6_LOCAL_BPF_PROG_MAX + 1];
846 ret = nla_parse_nested_deprecated(tb, SEG6_LOCAL_BPF_PROG_MAX,
847 attrs[SEG6_LOCAL_BPF],
848 bpf_prog_policy, NULL);
852 if (!tb[SEG6_LOCAL_BPF_PROG] || !tb[SEG6_LOCAL_BPF_PROG_NAME])
855 slwt->bpf.name = nla_memdup(tb[SEG6_LOCAL_BPF_PROG_NAME], GFP_KERNEL);
859 fd = nla_get_u32(tb[SEG6_LOCAL_BPF_PROG]);
860 p = bpf_prog_get_type(fd, BPF_PROG_TYPE_LWT_SEG6LOCAL);
862 kfree(slwt->bpf.name);
870 static int put_nla_bpf(struct sk_buff *skb, struct seg6_local_lwt *slwt)
877 nest = nla_nest_start_noflag(skb, SEG6_LOCAL_BPF);
881 if (nla_put_u32(skb, SEG6_LOCAL_BPF_PROG, slwt->bpf.prog->aux->id))
884 if (slwt->bpf.name &&
885 nla_put_string(skb, SEG6_LOCAL_BPF_PROG_NAME, slwt->bpf.name))
888 return nla_nest_end(skb, nest);
891 static int cmp_nla_bpf(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
893 if (!a->bpf.name && !b->bpf.name)
896 if (!a->bpf.name || !b->bpf.name)
899 return strcmp(a->bpf.name, b->bpf.name);
902 struct seg6_action_param {
903 int (*parse)(struct nlattr **attrs, struct seg6_local_lwt *slwt);
904 int (*put)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
905 int (*cmp)(struct seg6_local_lwt *a, struct seg6_local_lwt *b);
908 static struct seg6_action_param seg6_action_params[SEG6_LOCAL_MAX + 1] = {
909 [SEG6_LOCAL_SRH] = { .parse = parse_nla_srh,
911 .cmp = cmp_nla_srh },
913 [SEG6_LOCAL_TABLE] = { .parse = parse_nla_table,
914 .put = put_nla_table,
915 .cmp = cmp_nla_table },
917 [SEG6_LOCAL_NH4] = { .parse = parse_nla_nh4,
919 .cmp = cmp_nla_nh4 },
921 [SEG6_LOCAL_NH6] = { .parse = parse_nla_nh6,
923 .cmp = cmp_nla_nh6 },
925 [SEG6_LOCAL_IIF] = { .parse = parse_nla_iif,
927 .cmp = cmp_nla_iif },
929 [SEG6_LOCAL_OIF] = { .parse = parse_nla_oif,
931 .cmp = cmp_nla_oif },
933 [SEG6_LOCAL_BPF] = { .parse = parse_nla_bpf,
935 .cmp = cmp_nla_bpf },
939 static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt)
941 struct seg6_action_param *param;
942 struct seg6_action_desc *desc;
945 desc = __get_action_desc(slwt->action);
953 slwt->headroom += desc->static_headroom;
955 for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
956 if (desc->attrs & (1 << i)) {
960 param = &seg6_action_params[i];
962 err = param->parse(attrs, slwt);
971 static int seg6_local_build_state(struct nlattr *nla, unsigned int family,
972 const void *cfg, struct lwtunnel_state **ts,
973 struct netlink_ext_ack *extack)
975 struct nlattr *tb[SEG6_LOCAL_MAX + 1];
976 struct lwtunnel_state *newts;
977 struct seg6_local_lwt *slwt;
980 if (family != AF_INET6)
983 err = nla_parse_nested_deprecated(tb, SEG6_LOCAL_MAX, nla,
984 seg6_local_policy, extack);
989 if (!tb[SEG6_LOCAL_ACTION])
992 newts = lwtunnel_state_alloc(sizeof(*slwt));
996 slwt = seg6_local_lwtunnel(newts);
997 slwt->action = nla_get_u32(tb[SEG6_LOCAL_ACTION]);
999 err = parse_nla_action(tb, slwt);
1003 newts->type = LWTUNNEL_ENCAP_SEG6_LOCAL;
1004 newts->flags = LWTUNNEL_STATE_INPUT_REDIRECT;
1005 newts->headroom = slwt->headroom;
1017 static void seg6_local_destroy_state(struct lwtunnel_state *lwt)
1019 struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
1023 if (slwt->desc->attrs & (1 << SEG6_LOCAL_BPF)) {
1024 kfree(slwt->bpf.name);
1025 bpf_prog_put(slwt->bpf.prog);
1031 static int seg6_local_fill_encap(struct sk_buff *skb,
1032 struct lwtunnel_state *lwt)
1034 struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
1035 struct seg6_action_param *param;
1038 if (nla_put_u32(skb, SEG6_LOCAL_ACTION, slwt->action))
1041 for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
1042 if (slwt->desc->attrs & (1 << i)) {
1043 param = &seg6_action_params[i];
1044 err = param->put(skb, slwt);
1053 static int seg6_local_get_encap_size(struct lwtunnel_state *lwt)
1055 struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
1056 unsigned long attrs;
1059 nlsize = nla_total_size(4); /* action */
1061 attrs = slwt->desc->attrs;
1063 if (attrs & (1 << SEG6_LOCAL_SRH))
1064 nlsize += nla_total_size((slwt->srh->hdrlen + 1) << 3);
1066 if (attrs & (1 << SEG6_LOCAL_TABLE))
1067 nlsize += nla_total_size(4);
1069 if (attrs & (1 << SEG6_LOCAL_NH4))
1070 nlsize += nla_total_size(4);
1072 if (attrs & (1 << SEG6_LOCAL_NH6))
1073 nlsize += nla_total_size(16);
1075 if (attrs & (1 << SEG6_LOCAL_IIF))
1076 nlsize += nla_total_size(4);
1078 if (attrs & (1 << SEG6_LOCAL_OIF))
1079 nlsize += nla_total_size(4);
1081 if (attrs & (1 << SEG6_LOCAL_BPF))
1082 nlsize += nla_total_size(sizeof(struct nlattr)) +
1083 nla_total_size(MAX_PROG_NAME) +
1089 static int seg6_local_cmp_encap(struct lwtunnel_state *a,
1090 struct lwtunnel_state *b)
1092 struct seg6_local_lwt *slwt_a, *slwt_b;
1093 struct seg6_action_param *param;
1096 slwt_a = seg6_local_lwtunnel(a);
1097 slwt_b = seg6_local_lwtunnel(b);
1099 if (slwt_a->action != slwt_b->action)
1102 if (slwt_a->desc->attrs != slwt_b->desc->attrs)
1105 for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
1106 if (slwt_a->desc->attrs & (1 << i)) {
1107 param = &seg6_action_params[i];
1108 if (param->cmp(slwt_a, slwt_b))
1116 static const struct lwtunnel_encap_ops seg6_local_ops = {
1117 .build_state = seg6_local_build_state,
1118 .destroy_state = seg6_local_destroy_state,
1119 .input = seg6_local_input,
1120 .fill_encap = seg6_local_fill_encap,
1121 .get_encap_size = seg6_local_get_encap_size,
1122 .cmp_encap = seg6_local_cmp_encap,
1123 .owner = THIS_MODULE,
1126 int __init seg6_local_init(void)
1128 return lwtunnel_encap_add_ops(&seg6_local_ops,
1129 LWTUNNEL_ENCAP_SEG6_LOCAL);
1132 void seg6_local_exit(void)
1134 lwtunnel_encap_del_ops(&seg6_local_ops, LWTUNNEL_ENCAP_SEG6_LOCAL);