1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation. */
4 /* flow director ethtool support for iavf */
6 #include <linux/bitfield.h>
10 #define NAT_T_ESP_PORT 4500
11 #define PFCP_PORT 8805
13 static const struct in6_addr ipv6_addr_full_mask = {
16 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
17 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
22 static const struct in6_addr ipv6_addr_zero_mask = {
25 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
31 * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks
32 * @adapter: pointer to the VF adapter structure
33 * @fltr: Flow Director filter data structure
35 * Returns 0 if all masks of packet fields are either full or empty. Returns
36 * error on at least one partial mask.
38 int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
39 struct iavf_fdir_fltr *fltr)
41 if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX))
44 if (fltr->ip_ver == 4) {
45 if (fltr->ip_mask.v4_addrs.src_ip &&
46 fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX))
49 if (fltr->ip_mask.v4_addrs.dst_ip &&
50 fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX))
53 if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX)
55 } else if (fltr->ip_ver == 6) {
56 if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask,
57 sizeof(struct in6_addr)) &&
58 memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
59 sizeof(struct in6_addr)))
62 if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask,
63 sizeof(struct in6_addr)) &&
64 memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
65 sizeof(struct in6_addr)))
68 if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX)
72 if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX)
75 if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX))
78 if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX))
81 if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX))
84 if (fltr->ip_mask.l4_header &&
85 fltr->ip_mask.l4_header != htonl(U32_MAX))
91 dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n");
96 * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
97 * @fltr: Flow Director filter data structure
99 static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr)
101 return sizeof(struct ethhdr) +
102 (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
103 sizeof(struct udphdr);
107 * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header
108 * @fltr: Flow Director filter data structure
109 * @proto_hdrs: Flow Director protocol headers data structure
111 * Returns 0 if the GTP-U protocol header is set successfully
114 iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr,
115 struct virtchnl_proto_hdrs *proto_hdrs)
117 struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
118 struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
119 struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */
120 u16 adj_offs, hdr_offs;
123 VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP);
125 adj_offs = iavf_pkt_udp_no_pay_len(fltr);
127 for (i = 0; i < fltr->flex_cnt; i++) {
128 #define IAVF_GTPU_HDR_TEID_OFFS0 4
129 #define IAVF_GTPU_HDR_TEID_OFFS1 6
130 #define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10
131 #define IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK 0x00FF /* skip N_PDU */
132 /* PDU Session Container Extension Header (PSC) */
133 #define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85
134 #define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13
135 #define IAVF_GTPU_HDR_PSC_PDU_QFI_MASK 0x3F /* skip Type */
136 #define IAVF_GTPU_EH_QFI_IDX 1
138 if (fltr->flex_words[i].offset < adj_offs)
141 hdr_offs = fltr->flex_words[i].offset - adj_offs;
144 case IAVF_GTPU_HDR_TEID_OFFS0:
145 case IAVF_GTPU_HDR_TEID_OFFS1: {
146 __be16 *pay_word = (__be16 *)ghdr->buffer;
148 pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word);
149 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID);
152 case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS:
153 if ((fltr->flex_words[i].word &
154 IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK) !=
155 IAVF_GTPU_PSC_EXTHDR_TYPE)
158 ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
159 VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH);
161 case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS:
164 ehdr->buffer[IAVF_GTPU_EH_QFI_IDX] =
165 fltr->flex_words[i].word &
166 IAVF_GTPU_HDR_PSC_PDU_QFI_MASK;
167 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI);
174 uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
180 * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header
181 * @fltr: Flow Director filter data structure
182 * @proto_hdrs: Flow Director protocol headers data structure
184 * Returns 0 if the PFCP protocol header is set successfully
187 iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr,
188 struct virtchnl_proto_hdrs *proto_hdrs)
190 struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
191 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
192 u16 adj_offs, hdr_offs;
195 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
197 adj_offs = iavf_pkt_udp_no_pay_len(fltr);
199 for (i = 0; i < fltr->flex_cnt; i++) {
200 #define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0
201 if (fltr->flex_words[i].offset < adj_offs)
204 hdr_offs = fltr->flex_words[i].offset - adj_offs;
207 case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS:
208 hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff;
209 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
216 uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
222 * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header
223 * @fltr: Flow Director filter data structure
224 * @proto_hdrs: Flow Director protocol headers data structure
226 * Returns 0 if the NAT-T-ESP protocol header is set successfully
229 iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr,
230 struct virtchnl_proto_hdrs *proto_hdrs)
232 struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
233 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
234 u16 adj_offs, hdr_offs;
238 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
240 adj_offs = iavf_pkt_udp_no_pay_len(fltr);
242 for (i = 0; i < fltr->flex_cnt; i++) {
243 #define IAVF_NAT_T_ESP_SPI_OFFS0 0
244 #define IAVF_NAT_T_ESP_SPI_OFFS1 2
245 if (fltr->flex_words[i].offset < adj_offs)
248 hdr_offs = fltr->flex_words[i].offset - adj_offs;
251 case IAVF_NAT_T_ESP_SPI_OFFS0:
252 spi |= fltr->flex_words[i].word << 16;
254 case IAVF_NAT_T_ESP_SPI_OFFS1:
255 spi |= fltr->flex_words[i].word;
263 return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */
265 *(__be32 *)hdr->buffer = htonl(spi);
266 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
268 uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
274 * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header
275 * @fltr: Flow Director filter data structure
276 * @proto_hdrs: Flow Director protocol headers data structure
278 * Returns 0 if the UDP payload defined protocol header is set successfully
281 iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr,
282 struct virtchnl_proto_hdrs *proto_hdrs)
286 switch (ntohs(fltr->ip_data.dst_port)) {
288 err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs);
291 err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs);
294 err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs);
305 * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header
306 * @fltr: Flow Director filter data structure
307 * @proto_hdrs: Flow Director protocol headers data structure
309 * Returns 0 if the IPv4 protocol header is set successfully
312 iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
313 struct virtchnl_proto_hdrs *proto_hdrs)
315 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
316 struct iphdr *iph = (struct iphdr *)hdr->buffer;
318 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
320 if (fltr->ip_mask.tos == U8_MAX) {
321 iph->tos = fltr->ip_data.tos;
322 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
325 if (fltr->ip_mask.proto == U8_MAX) {
326 iph->protocol = fltr->ip_data.proto;
327 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
330 if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) {
331 iph->saddr = fltr->ip_data.v4_addrs.src_ip;
332 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
335 if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) {
336 iph->daddr = fltr->ip_data.v4_addrs.dst_ip;
337 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
344 * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header
345 * @fltr: Flow Director filter data structure
346 * @proto_hdrs: Flow Director protocol headers data structure
348 * Returns 0 if the IPv6 protocol header is set successfully
351 iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
352 struct virtchnl_proto_hdrs *proto_hdrs)
354 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
355 struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer;
357 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
359 if (fltr->ip_mask.tclass == U8_MAX) {
360 iph->priority = (fltr->ip_data.tclass >> 4) & 0xF;
361 iph->flow_lbl[0] = FIELD_PREP(0xF0, fltr->ip_data.tclass);
362 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
365 if (fltr->ip_mask.proto == U8_MAX) {
366 iph->nexthdr = fltr->ip_data.proto;
367 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
370 if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
371 sizeof(struct in6_addr))) {
372 memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip,
373 sizeof(struct in6_addr));
374 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
377 if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
378 sizeof(struct in6_addr))) {
379 memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip,
380 sizeof(struct in6_addr));
381 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
388 * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header
389 * @fltr: Flow Director filter data structure
390 * @proto_hdrs: Flow Director protocol headers data structure
392 * Returns 0 if the TCP protocol header is set successfully
395 iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr,
396 struct virtchnl_proto_hdrs *proto_hdrs)
398 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
399 struct tcphdr *tcph = (struct tcphdr *)hdr->buffer;
401 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
403 if (fltr->ip_mask.src_port == htons(U16_MAX)) {
404 tcph->source = fltr->ip_data.src_port;
405 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
408 if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
409 tcph->dest = fltr->ip_data.dst_port;
410 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
417 * iavf_fill_fdir_udp_hdr - fill the UDP protocol header
418 * @fltr: Flow Director filter data structure
419 * @proto_hdrs: Flow Director protocol headers data structure
421 * Returns 0 if the UDP protocol header is set successfully
424 iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr,
425 struct virtchnl_proto_hdrs *proto_hdrs)
427 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
428 struct udphdr *udph = (struct udphdr *)hdr->buffer;
430 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
432 if (fltr->ip_mask.src_port == htons(U16_MAX)) {
433 udph->source = fltr->ip_data.src_port;
434 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
437 if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
438 udph->dest = fltr->ip_data.dst_port;
439 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
445 return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs);
449 * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header
450 * @fltr: Flow Director filter data structure
451 * @proto_hdrs: Flow Director protocol headers data structure
453 * Returns 0 if the SCTP protocol header is set successfully
456 iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr,
457 struct virtchnl_proto_hdrs *proto_hdrs)
459 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
460 struct sctphdr *sctph = (struct sctphdr *)hdr->buffer;
462 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
464 if (fltr->ip_mask.src_port == htons(U16_MAX)) {
465 sctph->source = fltr->ip_data.src_port;
466 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
469 if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
470 sctph->dest = fltr->ip_data.dst_port;
471 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
478 * iavf_fill_fdir_ah_hdr - fill the AH protocol header
479 * @fltr: Flow Director filter data structure
480 * @proto_hdrs: Flow Director protocol headers data structure
482 * Returns 0 if the AH protocol header is set successfully
485 iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr,
486 struct virtchnl_proto_hdrs *proto_hdrs)
488 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
489 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer;
491 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
493 if (fltr->ip_mask.spi == htonl(U32_MAX)) {
494 ah->spi = fltr->ip_data.spi;
495 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
502 * iavf_fill_fdir_esp_hdr - fill the ESP protocol header
503 * @fltr: Flow Director filter data structure
504 * @proto_hdrs: Flow Director protocol headers data structure
506 * Returns 0 if the ESP protocol header is set successfully
509 iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr,
510 struct virtchnl_proto_hdrs *proto_hdrs)
512 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
513 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer;
515 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
517 if (fltr->ip_mask.spi == htonl(U32_MAX)) {
518 esph->spi = fltr->ip_data.spi;
519 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
526 * iavf_fill_fdir_l4_hdr - fill the L4 protocol header
527 * @fltr: Flow Director filter data structure
528 * @proto_hdrs: Flow Director protocol headers data structure
530 * Returns 0 if the L4 protocol header is set successfully
533 iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr,
534 struct virtchnl_proto_hdrs *proto_hdrs)
536 struct virtchnl_proto_hdr *hdr;
539 if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */
542 hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
543 l4_4_data = (__be32 *)hdr->buffer;
545 /* L2TPv3 over IP with 'Session ID' */
546 if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) {
547 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
548 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
550 *l4_4_data = fltr->ip_data.l4_header;
559 * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header
560 * @fltr: Flow Director filter data structure
561 * @proto_hdrs: Flow Director protocol headers data structure
563 * Returns 0 if the Ethernet protocol header is set successfully
566 iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr,
567 struct virtchnl_proto_hdrs *proto_hdrs)
569 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
570 struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer;
572 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
574 if (fltr->eth_mask.etype == htons(U16_MAX)) {
575 if (fltr->eth_data.etype == htons(ETH_P_IP) ||
576 fltr->eth_data.etype == htons(ETH_P_IPV6))
579 ehdr->h_proto = fltr->eth_data.etype;
580 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
587 * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message
588 * @adapter: pointer to the VF adapter structure
589 * @fltr: Flow Director filter data structure
591 * Returns 0 if the add Flow Director virtchnl message is filled successfully
593 int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
595 struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg;
596 struct virtchnl_proto_hdrs *proto_hdrs;
599 proto_hdrs = &vc_msg->rule_cfg.proto_hdrs;
601 err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */
605 switch (fltr->flow_type) {
606 case IAVF_FDIR_FLOW_IPV4_TCP:
607 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
608 iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
610 case IAVF_FDIR_FLOW_IPV4_UDP:
611 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
612 iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
614 case IAVF_FDIR_FLOW_IPV4_SCTP:
615 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
616 iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
618 case IAVF_FDIR_FLOW_IPV4_AH:
619 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
620 iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
622 case IAVF_FDIR_FLOW_IPV4_ESP:
623 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
624 iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
626 case IAVF_FDIR_FLOW_IPV4_OTHER:
627 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
628 iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
630 case IAVF_FDIR_FLOW_IPV6_TCP:
631 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
632 iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
634 case IAVF_FDIR_FLOW_IPV6_UDP:
635 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
636 iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
638 case IAVF_FDIR_FLOW_IPV6_SCTP:
639 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
640 iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
642 case IAVF_FDIR_FLOW_IPV6_AH:
643 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
644 iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
646 case IAVF_FDIR_FLOW_IPV6_ESP:
647 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
648 iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
650 case IAVF_FDIR_FLOW_IPV6_OTHER:
651 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
652 iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
654 case IAVF_FDIR_FLOW_NON_IP_L2:
664 vc_msg->vsi_id = adapter->vsi.id;
665 vc_msg->rule_cfg.action_set.count = 1;
666 vc_msg->rule_cfg.action_set.actions[0].type = fltr->action;
667 vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index;
673 * iavf_fdir_flow_proto_name - get the flow protocol name
674 * @flow_type: Flow Director filter flow type
676 static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type)
679 case IAVF_FDIR_FLOW_IPV4_TCP:
680 case IAVF_FDIR_FLOW_IPV6_TCP:
682 case IAVF_FDIR_FLOW_IPV4_UDP:
683 case IAVF_FDIR_FLOW_IPV6_UDP:
685 case IAVF_FDIR_FLOW_IPV4_SCTP:
686 case IAVF_FDIR_FLOW_IPV6_SCTP:
688 case IAVF_FDIR_FLOW_IPV4_AH:
689 case IAVF_FDIR_FLOW_IPV6_AH:
691 case IAVF_FDIR_FLOW_IPV4_ESP:
692 case IAVF_FDIR_FLOW_IPV6_ESP:
694 case IAVF_FDIR_FLOW_IPV4_OTHER:
695 case IAVF_FDIR_FLOW_IPV6_OTHER:
697 case IAVF_FDIR_FLOW_NON_IP_L2:
705 * iavf_print_fdir_fltr
706 * @adapter: adapter structure
707 * @fltr: Flow Director filter to print
709 * Print the Flow Director filter
711 void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
713 const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type);
718 switch (fltr->flow_type) {
719 case IAVF_FDIR_FLOW_IPV4_TCP:
720 case IAVF_FDIR_FLOW_IPV4_UDP:
721 case IAVF_FDIR_FLOW_IPV4_SCTP:
722 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n",
724 &fltr->ip_data.v4_addrs.dst_ip,
725 &fltr->ip_data.v4_addrs.src_ip,
727 ntohs(fltr->ip_data.dst_port),
728 ntohs(fltr->ip_data.src_port));
730 case IAVF_FDIR_FLOW_IPV4_AH:
731 case IAVF_FDIR_FLOW_IPV4_ESP:
732 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n",
734 &fltr->ip_data.v4_addrs.dst_ip,
735 &fltr->ip_data.v4_addrs.src_ip,
737 ntohl(fltr->ip_data.spi));
739 case IAVF_FDIR_FLOW_IPV4_OTHER:
740 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n",
742 &fltr->ip_data.v4_addrs.dst_ip,
743 &fltr->ip_data.v4_addrs.src_ip,
745 ntohl(fltr->ip_data.l4_header));
747 case IAVF_FDIR_FLOW_IPV6_TCP:
748 case IAVF_FDIR_FLOW_IPV6_UDP:
749 case IAVF_FDIR_FLOW_IPV6_SCTP:
750 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n",
752 &fltr->ip_data.v6_addrs.dst_ip,
753 &fltr->ip_data.v6_addrs.src_ip,
755 ntohs(fltr->ip_data.dst_port),
756 ntohs(fltr->ip_data.src_port));
758 case IAVF_FDIR_FLOW_IPV6_AH:
759 case IAVF_FDIR_FLOW_IPV6_ESP:
760 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n",
762 &fltr->ip_data.v6_addrs.dst_ip,
763 &fltr->ip_data.v6_addrs.src_ip,
765 ntohl(fltr->ip_data.spi));
767 case IAVF_FDIR_FLOW_IPV6_OTHER:
768 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n",
770 &fltr->ip_data.v6_addrs.dst_ip,
771 &fltr->ip_data.v6_addrs.src_ip,
773 ntohl(fltr->ip_data.l4_header));
775 case IAVF_FDIR_FLOW_NON_IP_L2:
776 dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n",
778 ntohs(fltr->eth_data.etype));
786 * iavf_fdir_is_dup_fltr - test if filter is already in list
787 * @adapter: pointer to the VF adapter structure
788 * @fltr: Flow Director filter data structure
790 * Returns true if the filter is found in the list
792 bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
794 struct iavf_fdir_fltr *tmp;
797 spin_lock_bh(&adapter->fdir_fltr_lock);
798 list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
799 if (iavf_is_raw_fdir(fltr))
802 if (tmp->flow_type != fltr->flow_type)
805 if (!memcmp(&tmp->eth_data, &fltr->eth_data,
806 sizeof(fltr->eth_data)) &&
807 !memcmp(&tmp->ip_data, &fltr->ip_data,
808 sizeof(fltr->ip_data)) &&
809 !memcmp(&tmp->ext_data, &fltr->ext_data,
810 sizeof(fltr->ext_data))) {
815 spin_unlock_bh(&adapter->fdir_fltr_lock);
821 * iavf_find_fdir_fltr - find FDIR filter
822 * @adapter: pointer to the VF adapter structure
823 * @is_raw: filter type, is raw (tc u32) or not (ethtool)
824 * @data: data to ID the filter, type dependent
826 * Returns: pointer to Flow Director filter if found or NULL. Lock must be held.
828 struct iavf_fdir_fltr *iavf_find_fdir_fltr(struct iavf_adapter *adapter,
829 bool is_raw, u32 data)
831 struct iavf_fdir_fltr *rule;
833 list_for_each_entry(rule, &adapter->fdir_list_head, list) {
834 if ((is_raw && rule->cls_u32_handle == data) ||
835 (!is_raw && rule->loc == data))
843 * iavf_fdir_add_fltr - add a new node to the flow director filter list
844 * @adapter: pointer to the VF adapter structure
845 * @fltr: filter node to add to structure
847 * Return: 0 on success or negative errno on failure.
849 int iavf_fdir_add_fltr(struct iavf_adapter *adapter,
850 struct iavf_fdir_fltr *fltr)
852 struct iavf_fdir_fltr *rule, *parent = NULL;
854 spin_lock_bh(&adapter->fdir_fltr_lock);
855 if (iavf_fdir_max_reached(adapter)) {
856 spin_unlock_bh(&adapter->fdir_fltr_lock);
857 dev_err(&adapter->pdev->dev,
858 "Unable to add Flow Director filter (limit (%u) reached)\n",
859 IAVF_MAX_FDIR_FILTERS);
863 list_for_each_entry(rule, &adapter->fdir_list_head, list) {
864 if (iavf_is_raw_fdir(fltr))
867 if (rule->loc >= fltr->loc)
873 list_add(&fltr->list, &parent->list);
875 list_add(&fltr->list, &adapter->fdir_list_head);
877 iavf_inc_fdir_active_fltr(adapter, fltr);
879 if (adapter->link_up)
880 fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
882 fltr->state = IAVF_FDIR_FLTR_INACTIVE;
883 spin_unlock_bh(&adapter->fdir_fltr_lock);
885 if (adapter->link_up)
886 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_FDIR_FILTER);
892 * iavf_fdir_del_fltr - delete a flow director filter from the list
893 * @adapter: pointer to the VF adapter structure
894 * @is_raw: filter type, is raw (tc u32) or not (ethtool)
895 * @data: data to ID the filter, type dependent
897 * Return: 0 on success or negative errno on failure.
899 int iavf_fdir_del_fltr(struct iavf_adapter *adapter, bool is_raw, u32 data)
901 struct iavf_fdir_fltr *fltr = NULL;
904 spin_lock_bh(&adapter->fdir_fltr_lock);
905 fltr = iavf_find_fdir_fltr(adapter, is_raw, data);
908 if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
909 fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
910 } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) {
911 list_del(&fltr->list);
912 iavf_dec_fdir_active_fltr(adapter, fltr);
918 } else if (adapter->fdir_active_fltr) {
922 if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
923 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_FDIR_FILTER);
925 spin_unlock_bh(&adapter->fdir_fltr_lock);