]> Git Repo - linux.git/blob - tools/testing/selftests/bpf/prog_tests/flow_dissector.c
scsi: zfcp: Trace when request remove fails after qdio send fails
[linux.git] / tools / testing / selftests / bpf / prog_tests / flow_dissector.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 #include <error.h>
5 #include <linux/if.h>
6 #include <linux/if_tun.h>
7 #include <sys/uio.h>
8
9 #include "bpf_flow.skel.h"
10
11 #define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
12
13 #ifndef IP_MF
14 #define IP_MF 0x2000
15 #endif
16
17 #define CHECK_FLOW_KEYS(desc, got, expected)                            \
18         _CHECK(memcmp(&got, &expected, sizeof(got)) != 0,               \
19               desc,                                                     \
20               topts.duration,                                           \
21               "nhoff=%u/%u "                                            \
22               "thoff=%u/%u "                                            \
23               "addr_proto=0x%x/0x%x "                                   \
24               "is_frag=%u/%u "                                          \
25               "is_first_frag=%u/%u "                                    \
26               "is_encap=%u/%u "                                         \
27               "ip_proto=0x%x/0x%x "                                     \
28               "n_proto=0x%x/0x%x "                                      \
29               "flow_label=0x%x/0x%x "                                   \
30               "sport=%u/%u "                                            \
31               "dport=%u/%u\n",                                          \
32               got.nhoff, expected.nhoff,                                \
33               got.thoff, expected.thoff,                                \
34               got.addr_proto, expected.addr_proto,                      \
35               got.is_frag, expected.is_frag,                            \
36               got.is_first_frag, expected.is_first_frag,                \
37               got.is_encap, expected.is_encap,                          \
38               got.ip_proto, expected.ip_proto,                          \
39               got.n_proto, expected.n_proto,                            \
40               got.flow_label, expected.flow_label,                      \
41               got.sport, expected.sport,                                \
42               got.dport, expected.dport)
43
44 struct ipv4_pkt {
45         struct ethhdr eth;
46         struct iphdr iph;
47         struct tcphdr tcp;
48 } __packed;
49
50 struct ipip_pkt {
51         struct ethhdr eth;
52         struct iphdr iph;
53         struct iphdr iph_inner;
54         struct tcphdr tcp;
55 } __packed;
56
57 struct svlan_ipv4_pkt {
58         struct ethhdr eth;
59         __u16 vlan_tci;
60         __u16 vlan_proto;
61         struct iphdr iph;
62         struct tcphdr tcp;
63 } __packed;
64
65 struct ipv6_pkt {
66         struct ethhdr eth;
67         struct ipv6hdr iph;
68         struct tcphdr tcp;
69 } __packed;
70
71 struct ipv6_frag_pkt {
72         struct ethhdr eth;
73         struct ipv6hdr iph;
74         struct frag_hdr {
75                 __u8 nexthdr;
76                 __u8 reserved;
77                 __be16 frag_off;
78                 __be32 identification;
79         } ipf;
80         struct tcphdr tcp;
81 } __packed;
82
83 struct dvlan_ipv6_pkt {
84         struct ethhdr eth;
85         __u16 vlan_tci;
86         __u16 vlan_proto;
87         __u16 vlan_tci2;
88         __u16 vlan_proto2;
89         struct ipv6hdr iph;
90         struct tcphdr tcp;
91 } __packed;
92
93 struct test {
94         const char *name;
95         union {
96                 struct ipv4_pkt ipv4;
97                 struct svlan_ipv4_pkt svlan_ipv4;
98                 struct ipip_pkt ipip;
99                 struct ipv6_pkt ipv6;
100                 struct ipv6_frag_pkt ipv6_frag;
101                 struct dvlan_ipv6_pkt dvlan_ipv6;
102         } pkt;
103         struct bpf_flow_keys keys;
104         __u32 flags;
105         __u32 retval;
106 };
107
108 #define VLAN_HLEN       4
109
110 static __u32 duration;
111 struct test tests[] = {
112         {
113                 .name = "ipv4",
114                 .pkt.ipv4 = {
115                         .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
116                         .iph.ihl = 5,
117                         .iph.protocol = IPPROTO_TCP,
118                         .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
119                         .tcp.doff = 5,
120                         .tcp.source = 80,
121                         .tcp.dest = 8080,
122                 },
123                 .keys = {
124                         .nhoff = ETH_HLEN,
125                         .thoff = ETH_HLEN + sizeof(struct iphdr),
126                         .addr_proto = ETH_P_IP,
127                         .ip_proto = IPPROTO_TCP,
128                         .n_proto = __bpf_constant_htons(ETH_P_IP),
129                         .sport = 80,
130                         .dport = 8080,
131                 },
132                 .retval = BPF_OK,
133         },
134         {
135                 .name = "ipv6",
136                 .pkt.ipv6 = {
137                         .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
138                         .iph.nexthdr = IPPROTO_TCP,
139                         .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
140                         .tcp.doff = 5,
141                         .tcp.source = 80,
142                         .tcp.dest = 8080,
143                 },
144                 .keys = {
145                         .nhoff = ETH_HLEN,
146                         .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
147                         .addr_proto = ETH_P_IPV6,
148                         .ip_proto = IPPROTO_TCP,
149                         .n_proto = __bpf_constant_htons(ETH_P_IPV6),
150                         .sport = 80,
151                         .dport = 8080,
152                 },
153                 .retval = BPF_OK,
154         },
155         {
156                 .name = "802.1q-ipv4",
157                 .pkt.svlan_ipv4 = {
158                         .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
159                         .vlan_proto = __bpf_constant_htons(ETH_P_IP),
160                         .iph.ihl = 5,
161                         .iph.protocol = IPPROTO_TCP,
162                         .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
163                         .tcp.doff = 5,
164                         .tcp.source = 80,
165                         .tcp.dest = 8080,
166                 },
167                 .keys = {
168                         .nhoff = ETH_HLEN + VLAN_HLEN,
169                         .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
170                         .addr_proto = ETH_P_IP,
171                         .ip_proto = IPPROTO_TCP,
172                         .n_proto = __bpf_constant_htons(ETH_P_IP),
173                         .sport = 80,
174                         .dport = 8080,
175                 },
176                 .retval = BPF_OK,
177         },
178         {
179                 .name = "802.1ad-ipv6",
180                 .pkt.dvlan_ipv6 = {
181                         .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
182                         .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
183                         .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
184                         .iph.nexthdr = IPPROTO_TCP,
185                         .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
186                         .tcp.doff = 5,
187                         .tcp.source = 80,
188                         .tcp.dest = 8080,
189                 },
190                 .keys = {
191                         .nhoff = ETH_HLEN + VLAN_HLEN * 2,
192                         .thoff = ETH_HLEN + VLAN_HLEN * 2 +
193                                 sizeof(struct ipv6hdr),
194                         .addr_proto = ETH_P_IPV6,
195                         .ip_proto = IPPROTO_TCP,
196                         .n_proto = __bpf_constant_htons(ETH_P_IPV6),
197                         .sport = 80,
198                         .dport = 8080,
199                 },
200                 .retval = BPF_OK,
201         },
202         {
203                 .name = "ipv4-frag",
204                 .pkt.ipv4 = {
205                         .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
206                         .iph.ihl = 5,
207                         .iph.protocol = IPPROTO_TCP,
208                         .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
209                         .iph.frag_off = __bpf_constant_htons(IP_MF),
210                         .tcp.doff = 5,
211                         .tcp.source = 80,
212                         .tcp.dest = 8080,
213                 },
214                 .keys = {
215                         .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
216                         .nhoff = ETH_HLEN,
217                         .thoff = ETH_HLEN + sizeof(struct iphdr),
218                         .addr_proto = ETH_P_IP,
219                         .ip_proto = IPPROTO_TCP,
220                         .n_proto = __bpf_constant_htons(ETH_P_IP),
221                         .is_frag = true,
222                         .is_first_frag = true,
223                         .sport = 80,
224                         .dport = 8080,
225                 },
226                 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
227                 .retval = BPF_OK,
228         },
229         {
230                 .name = "ipv4-no-frag",
231                 .pkt.ipv4 = {
232                         .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
233                         .iph.ihl = 5,
234                         .iph.protocol = IPPROTO_TCP,
235                         .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
236                         .iph.frag_off = __bpf_constant_htons(IP_MF),
237                         .tcp.doff = 5,
238                         .tcp.source = 80,
239                         .tcp.dest = 8080,
240                 },
241                 .keys = {
242                         .nhoff = ETH_HLEN,
243                         .thoff = ETH_HLEN + sizeof(struct iphdr),
244                         .addr_proto = ETH_P_IP,
245                         .ip_proto = IPPROTO_TCP,
246                         .n_proto = __bpf_constant_htons(ETH_P_IP),
247                         .is_frag = true,
248                         .is_first_frag = true,
249                 },
250                 .retval = BPF_OK,
251         },
252         {
253                 .name = "ipv6-frag",
254                 .pkt.ipv6_frag = {
255                         .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
256                         .iph.nexthdr = IPPROTO_FRAGMENT,
257                         .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
258                         .ipf.nexthdr = IPPROTO_TCP,
259                         .tcp.doff = 5,
260                         .tcp.source = 80,
261                         .tcp.dest = 8080,
262                 },
263                 .keys = {
264                         .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
265                         .nhoff = ETH_HLEN,
266                         .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
267                                 sizeof(struct frag_hdr),
268                         .addr_proto = ETH_P_IPV6,
269                         .ip_proto = IPPROTO_TCP,
270                         .n_proto = __bpf_constant_htons(ETH_P_IPV6),
271                         .is_frag = true,
272                         .is_first_frag = true,
273                         .sport = 80,
274                         .dport = 8080,
275                 },
276                 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
277                 .retval = BPF_OK,
278         },
279         {
280                 .name = "ipv6-no-frag",
281                 .pkt.ipv6_frag = {
282                         .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
283                         .iph.nexthdr = IPPROTO_FRAGMENT,
284                         .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
285                         .ipf.nexthdr = IPPROTO_TCP,
286                         .tcp.doff = 5,
287                         .tcp.source = 80,
288                         .tcp.dest = 8080,
289                 },
290                 .keys = {
291                         .nhoff = ETH_HLEN,
292                         .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
293                                 sizeof(struct frag_hdr),
294                         .addr_proto = ETH_P_IPV6,
295                         .ip_proto = IPPROTO_TCP,
296                         .n_proto = __bpf_constant_htons(ETH_P_IPV6),
297                         .is_frag = true,
298                         .is_first_frag = true,
299                 },
300                 .retval = BPF_OK,
301         },
302         {
303                 .name = "ipv6-flow-label",
304                 .pkt.ipv6 = {
305                         .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
306                         .iph.nexthdr = IPPROTO_TCP,
307                         .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
308                         .iph.flow_lbl = { 0xb, 0xee, 0xef },
309                         .tcp.doff = 5,
310                         .tcp.source = 80,
311                         .tcp.dest = 8080,
312                 },
313                 .keys = {
314                         .nhoff = ETH_HLEN,
315                         .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
316                         .addr_proto = ETH_P_IPV6,
317                         .ip_proto = IPPROTO_TCP,
318                         .n_proto = __bpf_constant_htons(ETH_P_IPV6),
319                         .sport = 80,
320                         .dport = 8080,
321                         .flow_label = __bpf_constant_htonl(0xbeeef),
322                 },
323                 .retval = BPF_OK,
324         },
325         {
326                 .name = "ipv6-no-flow-label",
327                 .pkt.ipv6 = {
328                         .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
329                         .iph.nexthdr = IPPROTO_TCP,
330                         .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
331                         .iph.flow_lbl = { 0xb, 0xee, 0xef },
332                         .tcp.doff = 5,
333                         .tcp.source = 80,
334                         .tcp.dest = 8080,
335                 },
336                 .keys = {
337                         .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
338                         .nhoff = ETH_HLEN,
339                         .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
340                         .addr_proto = ETH_P_IPV6,
341                         .ip_proto = IPPROTO_TCP,
342                         .n_proto = __bpf_constant_htons(ETH_P_IPV6),
343                         .flow_label = __bpf_constant_htonl(0xbeeef),
344                 },
345                 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
346                 .retval = BPF_OK,
347         },
348         {
349                 .name = "ipip-encap",
350                 .pkt.ipip = {
351                         .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
352                         .iph.ihl = 5,
353                         .iph.protocol = IPPROTO_IPIP,
354                         .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
355                         .iph_inner.ihl = 5,
356                         .iph_inner.protocol = IPPROTO_TCP,
357                         .iph_inner.tot_len =
358                                 __bpf_constant_htons(MAGIC_BYTES) -
359                                 sizeof(struct iphdr),
360                         .tcp.doff = 5,
361                         .tcp.source = 80,
362                         .tcp.dest = 8080,
363                 },
364                 .keys = {
365                         .nhoff = ETH_HLEN,
366                         .thoff = ETH_HLEN + sizeof(struct iphdr) +
367                                 sizeof(struct iphdr),
368                         .addr_proto = ETH_P_IP,
369                         .ip_proto = IPPROTO_TCP,
370                         .n_proto = __bpf_constant_htons(ETH_P_IP),
371                         .is_encap = true,
372                         .sport = 80,
373                         .dport = 8080,
374                 },
375                 .retval = BPF_OK,
376         },
377         {
378                 .name = "ipip-no-encap",
379                 .pkt.ipip = {
380                         .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
381                         .iph.ihl = 5,
382                         .iph.protocol = IPPROTO_IPIP,
383                         .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
384                         .iph_inner.ihl = 5,
385                         .iph_inner.protocol = IPPROTO_TCP,
386                         .iph_inner.tot_len =
387                                 __bpf_constant_htons(MAGIC_BYTES) -
388                                 sizeof(struct iphdr),
389                         .tcp.doff = 5,
390                         .tcp.source = 80,
391                         .tcp.dest = 8080,
392                 },
393                 .keys = {
394                         .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
395                         .nhoff = ETH_HLEN,
396                         .thoff = ETH_HLEN + sizeof(struct iphdr),
397                         .addr_proto = ETH_P_IP,
398                         .ip_proto = IPPROTO_IPIP,
399                         .n_proto = __bpf_constant_htons(ETH_P_IP),
400                         .is_encap = true,
401                 },
402                 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
403                 .retval = BPF_OK,
404         },
405         {
406                 .name = "ipip-encap-dissector-continue",
407                 .pkt.ipip = {
408                         .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
409                         .iph.ihl = 5,
410                         .iph.protocol = IPPROTO_IPIP,
411                         .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
412                         .iph.saddr = __bpf_constant_htonl(FLOW_CONTINUE_SADDR),
413                         .iph_inner.ihl = 5,
414                         .iph_inner.protocol = IPPROTO_TCP,
415                         .iph_inner.tot_len =
416                                 __bpf_constant_htons(MAGIC_BYTES) -
417                                 sizeof(struct iphdr),
418                         .tcp.doff = 5,
419                         .tcp.source = 99,
420                         .tcp.dest = 9090,
421                 },
422                 .retval = BPF_FLOW_DISSECTOR_CONTINUE,
423         },
424 };
425
426 static int create_tap(const char *ifname)
427 {
428         struct ifreq ifr = {
429                 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
430         };
431         int fd, ret;
432
433         strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
434
435         fd = open("/dev/net/tun", O_RDWR);
436         if (fd < 0)
437                 return -1;
438
439         ret = ioctl(fd, TUNSETIFF, &ifr);
440         if (ret)
441                 return -1;
442
443         return fd;
444 }
445
446 static int tx_tap(int fd, void *pkt, size_t len)
447 {
448         struct iovec iov[] = {
449                 {
450                         .iov_len = len,
451                         .iov_base = pkt,
452                 },
453         };
454         return writev(fd, iov, ARRAY_SIZE(iov));
455 }
456
457 static int ifup(const char *ifname)
458 {
459         struct ifreq ifr = {};
460         int sk, ret;
461
462         strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
463
464         sk = socket(PF_INET, SOCK_DGRAM, 0);
465         if (sk < 0)
466                 return -1;
467
468         ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
469         if (ret) {
470                 close(sk);
471                 return -1;
472         }
473
474         ifr.ifr_flags |= IFF_UP;
475         ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
476         if (ret) {
477                 close(sk);
478                 return -1;
479         }
480
481         close(sk);
482         return 0;
483 }
484
485 static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
486 {
487         int i, err, map_fd, prog_fd;
488         struct bpf_program *prog;
489         char prog_name[32];
490
491         map_fd = bpf_map__fd(prog_array);
492         if (map_fd < 0)
493                 return -1;
494
495         for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
496                 snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
497
498                 prog = bpf_object__find_program_by_name(obj, prog_name);
499                 if (!prog)
500                         return -1;
501
502                 prog_fd = bpf_program__fd(prog);
503                 if (prog_fd < 0)
504                         return -1;
505
506                 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
507                 if (err)
508                         return -1;
509         }
510         return 0;
511 }
512
513 static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
514 {
515         int i, err, keys_fd;
516
517         keys_fd = bpf_map__fd(keys);
518         if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
519                 return;
520
521         for (i = 0; i < ARRAY_SIZE(tests); i++) {
522                 /* Keep in sync with 'flags' from eth_get_headlen. */
523                 __u32 eth_get_headlen_flags =
524                         BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
525                 LIBBPF_OPTS(bpf_test_run_opts, topts);
526                 struct bpf_flow_keys flow_keys = {};
527                 __u32 key = (__u32)(tests[i].keys.sport) << 16 |
528                             tests[i].keys.dport;
529
530                 /* For skb-less case we can't pass input flags; run
531                  * only the tests that have a matching set of flags.
532                  */
533
534                 if (tests[i].flags != eth_get_headlen_flags)
535                         continue;
536
537                 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
538                 CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
539
540                 /* check the stored flow_keys only if BPF_OK expected */
541                 if (tests[i].retval != BPF_OK)
542                         continue;
543
544                 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
545                 ASSERT_OK(err, "bpf_map_lookup_elem");
546
547                 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
548
549                 err = bpf_map_delete_elem(keys_fd, &key);
550                 ASSERT_OK(err, "bpf_map_delete_elem");
551         }
552 }
553
554 static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
555 {
556         int err, prog_fd;
557
558         prog_fd = bpf_program__fd(skel->progs._dissect);
559         if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
560                 return;
561
562         err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
563         if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
564                 return;
565
566         run_tests_skb_less(tap_fd, skel->maps.last_dissection);
567
568         err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
569         CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
570 }
571
572 static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
573 {
574         struct bpf_link *link;
575         int err, net_fd;
576
577         net_fd = open("/proc/self/ns/net", O_RDONLY);
578         if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
579                 return;
580
581         link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
582         if (!ASSERT_OK_PTR(link, "attach_netns"))
583                 goto out_close;
584
585         run_tests_skb_less(tap_fd, skel->maps.last_dissection);
586
587         err = bpf_link__destroy(link);
588         CHECK(err, "bpf_link__destroy", "err %d\n", err);
589 out_close:
590         close(net_fd);
591 }
592
593 void test_flow_dissector(void)
594 {
595         int i, err, prog_fd, keys_fd = -1, tap_fd;
596         struct bpf_flow *skel;
597
598         skel = bpf_flow__open_and_load();
599         if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
600                 return;
601
602         prog_fd = bpf_program__fd(skel->progs._dissect);
603         if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
604                 goto out_destroy_skel;
605         keys_fd = bpf_map__fd(skel->maps.last_dissection);
606         if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
607                 goto out_destroy_skel;
608         err = init_prog_array(skel->obj, skel->maps.jmp_table);
609         if (CHECK(err, "init_prog_array", "err %d\n", err))
610                 goto out_destroy_skel;
611
612         for (i = 0; i < ARRAY_SIZE(tests); i++) {
613                 struct bpf_flow_keys flow_keys;
614                 LIBBPF_OPTS(bpf_test_run_opts, topts,
615                         .data_in = &tests[i].pkt,
616                         .data_size_in = sizeof(tests[i].pkt),
617                         .data_out = &flow_keys,
618                 );
619                 static struct bpf_flow_keys ctx = {};
620
621                 if (tests[i].flags) {
622                         topts.ctx_in = &ctx;
623                         topts.ctx_size_in = sizeof(ctx);
624                         ctx.flags = tests[i].flags;
625                 }
626
627                 err = bpf_prog_test_run_opts(prog_fd, &topts);
628                 ASSERT_OK(err, "test_run");
629                 ASSERT_EQ(topts.retval, tests[i].retval, "test_run retval");
630
631                 /* check the resulting flow_keys only if BPF_OK returned */
632                 if (topts.retval != BPF_OK)
633                         continue;
634                 ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
635                           "test_run data_size_out");
636                 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
637         }
638
639         /* Do the same tests but for skb-less flow dissector.
640          * We use a known path in the net/tun driver that calls
641          * eth_get_headlen and we manually export bpf_flow_keys
642          * via BPF map in this case.
643          */
644
645         tap_fd = create_tap("tap0");
646         CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
647         err = ifup("tap0");
648         CHECK(err, "ifup", "err %d errno %d\n", err, errno);
649
650         /* Test direct prog attachment */
651         test_skb_less_prog_attach(skel, tap_fd);
652         /* Test indirect prog attachment via link */
653         test_skb_less_link_create(skel, tap_fd);
654
655         close(tap_fd);
656 out_destroy_skel:
657         bpf_flow__destroy(skel);
658 }
This page took 0.080354 seconds and 4 git commands to generate.