3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/types.h>
17 #include <linux/bpf.h>
18 #include <net/lwtunnel.h>
20 #include <net/ip6_route.h>
21 #include <net/ipv6_stubs.h>
24 struct bpf_prog *prog;
29 struct bpf_lwt_prog in;
30 struct bpf_lwt_prog out;
31 struct bpf_lwt_prog xmit;
35 #define MAX_PROG_NAME 256
37 static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
39 return (struct bpf_lwt *)lwt->data;
42 #define NO_REDIRECT false
43 #define CAN_REDIRECT true
45 static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
46 struct dst_entry *dst, bool can_redirect)
50 /* Preempt disable is needed to protect per-cpu redirect_info between
51 * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
52 * access to maps strictly require a rcu_read_lock() for protection,
53 * mixing with BH RCU lock doesn't work.
56 bpf_compute_data_pointers(skb);
57 ret = bpf_prog_run_save_cb(lwt->prog, skb);
65 if (unlikely(!can_redirect)) {
66 pr_warn_once("Illegal redirect return code in prog %s\n",
67 lwt->name ? : "<unknown>");
70 skb_reset_mac_header(skb);
71 ret = skb_do_redirect(skb);
83 pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
94 static int bpf_lwt_input_reroute(struct sk_buff *skb)
98 if (skb->protocol == htons(ETH_P_IP)) {
99 struct iphdr *iph = ip_hdr(skb);
101 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
102 iph->tos, skb_dst(skb)->dev);
103 } else if (skb->protocol == htons(ETH_P_IPV6)) {
104 err = ipv6_stub->ipv6_route_input(skb);
111 return dst_input(skb);
118 static int bpf_input(struct sk_buff *skb)
120 struct dst_entry *dst = skb_dst(skb);
124 bpf = bpf_lwt_lwtunnel(dst->lwtstate);
126 ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
129 if (ret == BPF_LWT_REROUTE)
130 return bpf_lwt_input_reroute(skb);
133 if (unlikely(!dst->lwtstate->orig_input)) {
138 return dst->lwtstate->orig_input(skb);
141 static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
143 struct dst_entry *dst = skb_dst(skb);
147 bpf = bpf_lwt_lwtunnel(dst->lwtstate);
149 ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
154 if (unlikely(!dst->lwtstate->orig_output)) {
155 pr_warn_once("orig_output not set on dst for prog %s\n",
161 return dst->lwtstate->orig_output(net, sk, skb);
164 static int xmit_check_hhlen(struct sk_buff *skb)
166 int hh_len = skb_dst(skb)->dev->hard_header_len;
168 if (skb_headroom(skb) < hh_len) {
169 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
171 if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
178 static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
180 struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
181 int oif = l3mdev ? l3mdev->ifindex : 0;
182 struct dst_entry *dst = NULL;
183 int err = -EAFNOSUPPORT;
188 if (skb->protocol == htons(ETH_P_IP))
190 else if (skb->protocol == htons(ETH_P_IPV6))
195 sk = sk_to_full_sk(skb->sk);
197 if (sk->sk_bound_dev_if)
198 oif = sk->sk_bound_dev_if;
201 net = dev_net(skb_dst(skb)->dev);
205 struct iphdr *iph = ip_hdr(skb);
206 struct flowi4 fl4 = {};
209 fl4.flowi4_oif = oif;
210 fl4.flowi4_mark = skb->mark;
211 fl4.flowi4_uid = sock_net_uid(net, sk);
212 fl4.flowi4_tos = RT_TOS(iph->tos);
213 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
214 fl4.flowi4_proto = iph->protocol;
215 fl4.daddr = iph->daddr;
216 fl4.saddr = iph->saddr;
218 rt = ip_route_output_key(net, &fl4);
225 struct ipv6hdr *iph6 = ipv6_hdr(skb);
226 struct flowi6 fl6 = {};
228 fl6.flowi6_oif = oif;
229 fl6.flowi6_mark = skb->mark;
230 fl6.flowi6_uid = sock_net_uid(net, sk);
231 fl6.flowlabel = ip6_flowinfo(iph6);
232 fl6.flowi6_proto = iph6->nexthdr;
233 fl6.daddr = iph6->daddr;
234 fl6.saddr = iph6->saddr;
236 err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6);
244 if (unlikely(dst->error)) {
250 /* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
251 * was done for the previous dst, so we are doing it here again, in
252 * case the new dst needs much more space. The call below is a noop
253 * if there is enough header space in skb.
255 err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
260 skb_dst_set(skb, dst);
262 err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
266 /* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
267 return LWTUNNEL_XMIT_DONE;
274 static int bpf_xmit(struct sk_buff *skb)
276 struct dst_entry *dst = skb_dst(skb);
279 bpf = bpf_lwt_lwtunnel(dst->lwtstate);
280 if (bpf->xmit.prog) {
281 __be16 proto = skb->protocol;
284 ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
287 /* If the header changed, e.g. via bpf_lwt_push_encap,
288 * BPF_LWT_REROUTE below should have been used if the
289 * protocol was also changed.
291 if (skb->protocol != proto) {
295 /* If the header was expanded, headroom might be too
296 * small for L2 header to come, expand as needed.
298 ret = xmit_check_hhlen(skb);
302 return LWTUNNEL_XMIT_CONTINUE;
304 return LWTUNNEL_XMIT_DONE;
305 case BPF_LWT_REROUTE:
306 return bpf_lwt_xmit_reroute(skb);
312 return LWTUNNEL_XMIT_CONTINUE;
315 static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
318 bpf_prog_put(prog->prog);
323 static void bpf_destroy_state(struct lwtunnel_state *lwt)
325 struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
327 bpf_lwt_prog_destroy(&bpf->in);
328 bpf_lwt_prog_destroy(&bpf->out);
329 bpf_lwt_prog_destroy(&bpf->xmit);
332 static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = {
333 [LWT_BPF_PROG_FD] = { .type = NLA_U32, },
334 [LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
335 .len = MAX_PROG_NAME },
338 static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
339 enum bpf_prog_type type)
341 struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
346 ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
347 bpf_prog_policy, NULL);
351 if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
354 prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
358 fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
359 p = bpf_prog_get_type(fd, type);
368 static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
369 [LWT_BPF_IN] = { .type = NLA_NESTED, },
370 [LWT_BPF_OUT] = { .type = NLA_NESTED, },
371 [LWT_BPF_XMIT] = { .type = NLA_NESTED, },
372 [LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 },
375 static int bpf_build_state(struct nlattr *nla,
376 unsigned int family, const void *cfg,
377 struct lwtunnel_state **ts,
378 struct netlink_ext_ack *extack)
380 struct nlattr *tb[LWT_BPF_MAX + 1];
381 struct lwtunnel_state *newts;
385 if (family != AF_INET && family != AF_INET6)
386 return -EAFNOSUPPORT;
388 ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
393 if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
396 newts = lwtunnel_state_alloc(sizeof(*bpf));
400 newts->type = LWTUNNEL_ENCAP_BPF;
401 bpf = bpf_lwt_lwtunnel(newts);
403 if (tb[LWT_BPF_IN]) {
404 newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
405 ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
406 BPF_PROG_TYPE_LWT_IN);
411 if (tb[LWT_BPF_OUT]) {
412 newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
413 ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
414 BPF_PROG_TYPE_LWT_OUT);
419 if (tb[LWT_BPF_XMIT]) {
420 newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
421 ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
422 BPF_PROG_TYPE_LWT_XMIT);
427 if (tb[LWT_BPF_XMIT_HEADROOM]) {
428 u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
430 if (headroom > LWT_BPF_MAX_HEADROOM) {
435 newts->headroom = headroom;
438 bpf->family = family;
444 bpf_destroy_state(newts);
449 static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
450 struct bpf_lwt_prog *prog)
457 nest = nla_nest_start_noflag(skb, attr);
462 nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
465 return nla_nest_end(skb, nest);
468 static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
470 struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
472 if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
473 bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
474 bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
480 static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
482 int nest_len = nla_total_size(sizeof(struct nlattr)) +
483 nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */
486 return nest_len + /* LWT_BPF_IN */
487 nest_len + /* LWT_BPF_OUT */
488 nest_len + /* LWT_BPF_XMIT */
492 static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
495 * The LWT state is currently rebuilt for delete requests which
496 * results in a new bpf_prog instance. Comparing names for now.
498 if (!a->name && !b->name)
501 if (!a->name || !b->name)
504 return strcmp(a->name, b->name);
507 static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
509 struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a);
510 struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b);
512 return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) ||
513 bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) ||
514 bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit);
517 static const struct lwtunnel_encap_ops bpf_encap_ops = {
518 .build_state = bpf_build_state,
519 .destroy_state = bpf_destroy_state,
521 .output = bpf_output,
523 .fill_encap = bpf_fill_encap_info,
524 .get_encap_size = bpf_encap_nlsize,
525 .cmp_encap = bpf_encap_cmp,
526 .owner = THIS_MODULE,
529 static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
532 struct skb_shared_info *shinfo = skb_shinfo(skb);
534 gso_type |= SKB_GSO_DODGY;
535 shinfo->gso_type |= gso_type;
536 skb_decrease_gso_size(shinfo, encap_len);
537 shinfo->gso_segs = 0;
541 static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
547 /* SCTP and UDP_L4 gso need more nuanced handling than what
548 * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
549 * So at the moment only TCP GSO packets are let through.
551 if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
555 protocol = ip_hdr(skb)->protocol;
556 next_hdr_offset = sizeof(struct iphdr);
557 next_hdr = skb_network_header(skb) + next_hdr_offset;
559 protocol = ipv6_hdr(skb)->nexthdr;
560 next_hdr_offset = sizeof(struct ipv6hdr);
561 next_hdr = skb_network_header(skb) + next_hdr_offset;
566 next_hdr_offset += sizeof(struct gre_base_hdr);
567 if (next_hdr_offset > encap_len)
570 if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
571 return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
573 return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
576 next_hdr_offset += sizeof(struct udphdr);
577 if (next_hdr_offset > encap_len)
580 if (((struct udphdr *)next_hdr)->check)
581 return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
583 return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
588 return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
590 return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
593 return -EPROTONOSUPPORT;
597 int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
603 if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
606 /* validate protocol and length */
607 iph = (struct iphdr *)hdr;
608 if (iph->version == 4) {
610 if (unlikely(len < iph->ihl * 4))
612 } else if (iph->version == 6) {
614 if (unlikely(len < sizeof(struct ipv6hdr)))
621 err = skb_cow_head(skb, len + skb->mac_len);
623 err = skb_cow_head(skb,
624 len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
628 /* push the encap headers and fix pointers */
629 skb_reset_inner_headers(skb);
630 skb_reset_inner_mac_header(skb); /* mac header is not yet set */
631 skb_set_inner_protocol(skb, skb->protocol);
632 skb->encapsulation = 1;
635 skb_postpush_rcsum(skb, iph, len);
636 skb_reset_network_header(skb);
637 memcpy(skb_network_header(skb), hdr, len);
638 bpf_compute_data_pointers(skb);
642 skb->protocol = htons(ETH_P_IP);
646 iph->check = ip_fast_csum((unsigned char *)iph,
649 skb->protocol = htons(ETH_P_IPV6);
653 return handle_gso_encap(skb, ipv4, len);
658 static int __init bpf_lwt_init(void)
660 return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
663 subsys_initcall(bpf_lwt_init)