]> Git Repo - J-linux.git/blob - net/core/lwt_bpf.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / net / core / lwt_bpf.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Thomas Graf <[email protected]>
3  */
4
5 #include <linux/filter.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/skbuff.h>
9 #include <linux/types.h>
10 #include <linux/bpf.h>
11 #include <net/lwtunnel.h>
12 #include <net/gre.h>
13 #include <net/ip.h>
14 #include <net/ip6_route.h>
15 #include <net/ipv6_stubs.h>
16 #include <net/inet_dscp.h>
17
18 struct bpf_lwt_prog {
19         struct bpf_prog *prog;
20         char *name;
21 };
22
23 struct bpf_lwt {
24         struct bpf_lwt_prog in;
25         struct bpf_lwt_prog out;
26         struct bpf_lwt_prog xmit;
27         int family;
28 };
29
30 #define MAX_PROG_NAME 256
31
32 static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
33 {
34         return (struct bpf_lwt *)lwt->data;
35 }
36
37 #define NO_REDIRECT false
38 #define CAN_REDIRECT true
39
40 static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
41                        struct dst_entry *dst, bool can_redirect)
42 {
43         struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
44         int ret;
45
46         /* Disabling BH is needed to protect per-CPU bpf_redirect_info between
47          * BPF prog and skb_do_redirect().
48          */
49         local_bh_disable();
50         bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
51         bpf_compute_data_pointers(skb);
52         ret = bpf_prog_run_save_cb(lwt->prog, skb);
53
54         switch (ret) {
55         case BPF_OK:
56         case BPF_LWT_REROUTE:
57                 break;
58
59         case BPF_REDIRECT:
60                 if (unlikely(!can_redirect)) {
61                         pr_warn_once("Illegal redirect return code in prog %s\n",
62                                      lwt->name ? : "<unknown>");
63                         ret = BPF_OK;
64                 } else {
65                         skb_reset_mac_header(skb);
66                         skb_do_redirect(skb);
67                         ret = BPF_REDIRECT;
68                 }
69                 break;
70
71         case BPF_DROP:
72                 kfree_skb(skb);
73                 ret = -EPERM;
74                 break;
75
76         default:
77                 pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
78                 kfree_skb(skb);
79                 ret = -EINVAL;
80                 break;
81         }
82
83         bpf_net_ctx_clear(bpf_net_ctx);
84         local_bh_enable();
85
86         return ret;
87 }
88
89 static int bpf_lwt_input_reroute(struct sk_buff *skb)
90 {
91         enum skb_drop_reason reason;
92         int err = -EINVAL;
93
94         if (skb->protocol == htons(ETH_P_IP)) {
95                 struct net_device *dev = skb_dst(skb)->dev;
96                 const struct iphdr *iph = ip_hdr(skb);
97
98                 dev_hold(dev);
99                 skb_dst_drop(skb);
100                 reason = ip_route_input_noref(skb, iph->daddr, iph->saddr,
101                                               ip4h_dscp(iph), dev);
102                 err = reason ? -EINVAL : 0;
103                 dev_put(dev);
104         } else if (skb->protocol == htons(ETH_P_IPV6)) {
105                 skb_dst_drop(skb);
106                 err = ipv6_stub->ipv6_route_input(skb);
107         } else {
108                 err = -EAFNOSUPPORT;
109         }
110
111         if (err)
112                 goto err;
113         return dst_input(skb);
114
115 err:
116         kfree_skb(skb);
117         return err;
118 }
119
120 static int bpf_input(struct sk_buff *skb)
121 {
122         struct dst_entry *dst = skb_dst(skb);
123         struct bpf_lwt *bpf;
124         int ret;
125
126         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
127         if (bpf->in.prog) {
128                 ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
129                 if (ret < 0)
130                         return ret;
131                 if (ret == BPF_LWT_REROUTE)
132                         return bpf_lwt_input_reroute(skb);
133         }
134
135         if (unlikely(!dst->lwtstate->orig_input)) {
136                 kfree_skb(skb);
137                 return -EINVAL;
138         }
139
140         return dst->lwtstate->orig_input(skb);
141 }
142
143 static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
144 {
145         struct dst_entry *dst = skb_dst(skb);
146         struct bpf_lwt *bpf;
147         int ret;
148
149         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
150         if (bpf->out.prog) {
151                 ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
152                 if (ret < 0)
153                         return ret;
154         }
155
156         if (unlikely(!dst->lwtstate->orig_output)) {
157                 pr_warn_once("orig_output not set on dst for prog %s\n",
158                              bpf->out.name);
159                 kfree_skb(skb);
160                 return -EINVAL;
161         }
162
163         return dst->lwtstate->orig_output(net, sk, skb);
164 }
165
166 static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
167 {
168         if (skb_headroom(skb) < hh_len) {
169                 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
170
171                 if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
172                         return -ENOMEM;
173         }
174
175         return 0;
176 }
177
178 static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
179 {
180         struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
181         int oif = l3mdev ? l3mdev->ifindex : 0;
182         struct dst_entry *dst = NULL;
183         int err = -EAFNOSUPPORT;
184         struct sock *sk;
185         struct net *net;
186         bool ipv4;
187
188         if (skb->protocol == htons(ETH_P_IP))
189                 ipv4 = true;
190         else if (skb->protocol == htons(ETH_P_IPV6))
191                 ipv4 = false;
192         else
193                 goto err;
194
195         sk = sk_to_full_sk(skb->sk);
196         if (sk) {
197                 if (sk->sk_bound_dev_if)
198                         oif = sk->sk_bound_dev_if;
199                 net = sock_net(sk);
200         } else {
201                 net = dev_net(skb_dst(skb)->dev);
202         }
203
204         if (ipv4) {
205                 struct iphdr *iph = ip_hdr(skb);
206                 struct flowi4 fl4 = {};
207                 struct rtable *rt;
208
209                 fl4.flowi4_oif = oif;
210                 fl4.flowi4_mark = skb->mark;
211                 fl4.flowi4_uid = sock_net_uid(net, sk);
212                 fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph));
213                 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
214                 fl4.flowi4_proto = iph->protocol;
215                 fl4.daddr = iph->daddr;
216                 fl4.saddr = iph->saddr;
217
218                 rt = ip_route_output_key(net, &fl4);
219                 if (IS_ERR(rt)) {
220                         err = PTR_ERR(rt);
221                         goto err;
222                 }
223                 dst = &rt->dst;
224         } else {
225                 struct ipv6hdr *iph6 = ipv6_hdr(skb);
226                 struct flowi6 fl6 = {};
227
228                 fl6.flowi6_oif = oif;
229                 fl6.flowi6_mark = skb->mark;
230                 fl6.flowi6_uid = sock_net_uid(net, sk);
231                 fl6.flowlabel = ip6_flowinfo(iph6);
232                 fl6.flowi6_proto = iph6->nexthdr;
233                 fl6.daddr = iph6->daddr;
234                 fl6.saddr = iph6->saddr;
235
236                 dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL);
237                 if (IS_ERR(dst)) {
238                         err = PTR_ERR(dst);
239                         goto err;
240                 }
241         }
242         if (unlikely(dst->error)) {
243                 err = dst->error;
244                 dst_release(dst);
245                 goto err;
246         }
247
248         /* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
249          * was done for the previous dst, so we are doing it here again, in
250          * case the new dst needs much more space. The call below is a noop
251          * if there is enough header space in skb.
252          */
253         err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
254         if (unlikely(err))
255                 goto err;
256
257         skb_dst_drop(skb);
258         skb_dst_set(skb, dst);
259
260         err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
261         if (unlikely(err))
262                 return net_xmit_errno(err);
263
264         /* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
265         return LWTUNNEL_XMIT_DONE;
266
267 err:
268         kfree_skb(skb);
269         return err;
270 }
271
272 static int bpf_xmit(struct sk_buff *skb)
273 {
274         struct dst_entry *dst = skb_dst(skb);
275         struct bpf_lwt *bpf;
276
277         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
278         if (bpf->xmit.prog) {
279                 int hh_len = dst->dev->hard_header_len;
280                 __be16 proto = skb->protocol;
281                 int ret;
282
283                 ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
284                 switch (ret) {
285                 case BPF_OK:
286                         /* If the header changed, e.g. via bpf_lwt_push_encap,
287                          * BPF_LWT_REROUTE below should have been used if the
288                          * protocol was also changed.
289                          */
290                         if (skb->protocol != proto) {
291                                 kfree_skb(skb);
292                                 return -EINVAL;
293                         }
294                         /* If the header was expanded, headroom might be too
295                          * small for L2 header to come, expand as needed.
296                          */
297                         ret = xmit_check_hhlen(skb, hh_len);
298                         if (unlikely(ret))
299                                 return ret;
300
301                         return LWTUNNEL_XMIT_CONTINUE;
302                 case BPF_REDIRECT:
303                         return LWTUNNEL_XMIT_DONE;
304                 case BPF_LWT_REROUTE:
305                         return bpf_lwt_xmit_reroute(skb);
306                 default:
307                         return ret;
308                 }
309         }
310
311         return LWTUNNEL_XMIT_CONTINUE;
312 }
313
314 static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
315 {
316         if (prog->prog)
317                 bpf_prog_put(prog->prog);
318
319         kfree(prog->name);
320 }
321
322 static void bpf_destroy_state(struct lwtunnel_state *lwt)
323 {
324         struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
325
326         bpf_lwt_prog_destroy(&bpf->in);
327         bpf_lwt_prog_destroy(&bpf->out);
328         bpf_lwt_prog_destroy(&bpf->xmit);
329 }
330
331 static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = {
332         [LWT_BPF_PROG_FD]   = { .type = NLA_U32, },
333         [LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
334                                 .len = MAX_PROG_NAME },
335 };
336
337 static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
338                           enum bpf_prog_type type)
339 {
340         struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
341         struct bpf_prog *p;
342         int ret;
343         u32 fd;
344
345         ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
346                                           bpf_prog_policy, NULL);
347         if (ret < 0)
348                 return ret;
349
350         if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
351                 return -EINVAL;
352
353         prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
354         if (!prog->name)
355                 return -ENOMEM;
356
357         fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
358         p = bpf_prog_get_type(fd, type);
359         if (IS_ERR(p))
360                 return PTR_ERR(p);
361
362         prog->prog = p;
363
364         return 0;
365 }
366
367 static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
368         [LWT_BPF_IN]            = { .type = NLA_NESTED, },
369         [LWT_BPF_OUT]           = { .type = NLA_NESTED, },
370         [LWT_BPF_XMIT]          = { .type = NLA_NESTED, },
371         [LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 },
372 };
373
374 static int bpf_build_state(struct net *net, struct nlattr *nla,
375                            unsigned int family, const void *cfg,
376                            struct lwtunnel_state **ts,
377                            struct netlink_ext_ack *extack)
378 {
379         struct nlattr *tb[LWT_BPF_MAX + 1];
380         struct lwtunnel_state *newts;
381         struct bpf_lwt *bpf;
382         int ret;
383
384         if (family != AF_INET && family != AF_INET6)
385                 return -EAFNOSUPPORT;
386
387         ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
388                                           extack);
389         if (ret < 0)
390                 return ret;
391
392         if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
393                 return -EINVAL;
394
395         newts = lwtunnel_state_alloc(sizeof(*bpf));
396         if (!newts)
397                 return -ENOMEM;
398
399         newts->type = LWTUNNEL_ENCAP_BPF;
400         bpf = bpf_lwt_lwtunnel(newts);
401
402         if (tb[LWT_BPF_IN]) {
403                 newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
404                 ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
405                                      BPF_PROG_TYPE_LWT_IN);
406                 if (ret  < 0)
407                         goto errout;
408         }
409
410         if (tb[LWT_BPF_OUT]) {
411                 newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
412                 ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
413                                      BPF_PROG_TYPE_LWT_OUT);
414                 if (ret < 0)
415                         goto errout;
416         }
417
418         if (tb[LWT_BPF_XMIT]) {
419                 newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
420                 ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
421                                      BPF_PROG_TYPE_LWT_XMIT);
422                 if (ret < 0)
423                         goto errout;
424         }
425
426         if (tb[LWT_BPF_XMIT_HEADROOM]) {
427                 u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
428
429                 if (headroom > LWT_BPF_MAX_HEADROOM) {
430                         ret = -ERANGE;
431                         goto errout;
432                 }
433
434                 newts->headroom = headroom;
435         }
436
437         bpf->family = family;
438         *ts = newts;
439
440         return 0;
441
442 errout:
443         bpf_destroy_state(newts);
444         kfree(newts);
445         return ret;
446 }
447
448 static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
449                              struct bpf_lwt_prog *prog)
450 {
451         struct nlattr *nest;
452
453         if (!prog->prog)
454                 return 0;
455
456         nest = nla_nest_start_noflag(skb, attr);
457         if (!nest)
458                 return -EMSGSIZE;
459
460         if (prog->name &&
461             nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
462                 return -EMSGSIZE;
463
464         return nla_nest_end(skb, nest);
465 }
466
467 static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
468 {
469         struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
470
471         if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
472             bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
473             bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
474                 return -EMSGSIZE;
475
476         return 0;
477 }
478
479 static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
480 {
481         int nest_len = nla_total_size(sizeof(struct nlattr)) +
482                        nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */
483                        0;
484
485         return nest_len + /* LWT_BPF_IN */
486                nest_len + /* LWT_BPF_OUT */
487                nest_len + /* LWT_BPF_XMIT */
488                0;
489 }
490
491 static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
492 {
493         /* FIXME:
494          * The LWT state is currently rebuilt for delete requests which
495          * results in a new bpf_prog instance. Comparing names for now.
496          */
497         if (!a->name && !b->name)
498                 return 0;
499
500         if (!a->name || !b->name)
501                 return 1;
502
503         return strcmp(a->name, b->name);
504 }
505
506 static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
507 {
508         struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a);
509         struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b);
510
511         return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) ||
512                bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) ||
513                bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit);
514 }
515
516 static const struct lwtunnel_encap_ops bpf_encap_ops = {
517         .build_state    = bpf_build_state,
518         .destroy_state  = bpf_destroy_state,
519         .input          = bpf_input,
520         .output         = bpf_output,
521         .xmit           = bpf_xmit,
522         .fill_encap     = bpf_fill_encap_info,
523         .get_encap_size = bpf_encap_nlsize,
524         .cmp_encap      = bpf_encap_cmp,
525         .owner          = THIS_MODULE,
526 };
527
528 static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
529                            int encap_len)
530 {
531         struct skb_shared_info *shinfo = skb_shinfo(skb);
532
533         gso_type |= SKB_GSO_DODGY;
534         shinfo->gso_type |= gso_type;
535         skb_decrease_gso_size(shinfo, encap_len);
536         shinfo->gso_segs = 0;
537         return 0;
538 }
539
540 static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
541 {
542         int next_hdr_offset;
543         void *next_hdr;
544         __u8 protocol;
545
546         /* SCTP and UDP_L4 gso need more nuanced handling than what
547          * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
548          * So at the moment only TCP GSO packets are let through.
549          */
550         if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
551                 return -ENOTSUPP;
552
553         if (ipv4) {
554                 protocol = ip_hdr(skb)->protocol;
555                 next_hdr_offset = sizeof(struct iphdr);
556                 next_hdr = skb_network_header(skb) + next_hdr_offset;
557         } else {
558                 protocol = ipv6_hdr(skb)->nexthdr;
559                 next_hdr_offset = sizeof(struct ipv6hdr);
560                 next_hdr = skb_network_header(skb) + next_hdr_offset;
561         }
562
563         switch (protocol) {
564         case IPPROTO_GRE:
565                 next_hdr_offset += sizeof(struct gre_base_hdr);
566                 if (next_hdr_offset > encap_len)
567                         return -EINVAL;
568
569                 if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
570                         return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
571                                                encap_len);
572                 return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
573
574         case IPPROTO_UDP:
575                 next_hdr_offset += sizeof(struct udphdr);
576                 if (next_hdr_offset > encap_len)
577                         return -EINVAL;
578
579                 if (((struct udphdr *)next_hdr)->check)
580                         return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
581                                                encap_len);
582                 return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
583
584         case IPPROTO_IP:
585         case IPPROTO_IPV6:
586                 if (ipv4)
587                         return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
588                 else
589                         return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
590
591         default:
592                 return -EPROTONOSUPPORT;
593         }
594 }
595
596 int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
597 {
598         struct iphdr *iph;
599         bool ipv4;
600         int err;
601
602         if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
603                 return -EINVAL;
604
605         /* validate protocol and length */
606         iph = (struct iphdr *)hdr;
607         if (iph->version == 4) {
608                 ipv4 = true;
609                 if (unlikely(len < iph->ihl * 4))
610                         return -EINVAL;
611         } else if (iph->version == 6) {
612                 ipv4 = false;
613                 if (unlikely(len < sizeof(struct ipv6hdr)))
614                         return -EINVAL;
615         } else {
616                 return -EINVAL;
617         }
618
619         if (ingress)
620                 err = skb_cow_head(skb, len + skb->mac_len);
621         else
622                 err = skb_cow_head(skb,
623                                    len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
624         if (unlikely(err))
625                 return err;
626
627         /* push the encap headers and fix pointers */
628         skb_reset_inner_headers(skb);
629         skb_reset_inner_mac_header(skb);  /* mac header is not yet set */
630         skb_set_inner_protocol(skb, skb->protocol);
631         skb->encapsulation = 1;
632         skb_push(skb, len);
633         if (ingress)
634                 skb_postpush_rcsum(skb, iph, len);
635         skb_reset_network_header(skb);
636         memcpy(skb_network_header(skb), hdr, len);
637         bpf_compute_data_pointers(skb);
638         skb_clear_hash(skb);
639
640         if (ipv4) {
641                 skb->protocol = htons(ETH_P_IP);
642                 iph = ip_hdr(skb);
643
644                 if (!iph->check)
645                         iph->check = ip_fast_csum((unsigned char *)iph,
646                                                   iph->ihl);
647         } else {
648                 skb->protocol = htons(ETH_P_IPV6);
649         }
650
651         if (skb_is_gso(skb))
652                 return handle_gso_encap(skb, ipv4, len);
653
654         return 0;
655 }
656
657 static int __init bpf_lwt_init(void)
658 {
659         return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
660 }
661
662 subsys_initcall(bpf_lwt_init)
This page took 0.062506 seconds and 4 git commands to generate.