1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
7 #include <net/flow_offload.h>
11 #include "vcap_api_client.h"
14 enum vcap_is2_arp_opcode {
17 VCAP_IS2_RARP_REQUEST,
21 enum vcap_arp_opcode {
27 int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st)
29 enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
30 enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
31 struct flow_match_eth_addrs match;
32 struct vcap_u48_key smac, dmac;
35 flow_rule_match_eth_addrs(st->frule, &match);
37 if (!is_zero_ether_addr(match.mask->src)) {
38 vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
39 vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
40 err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
45 if (!is_zero_ether_addr(match.mask->dst)) {
46 vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
47 vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
48 err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
53 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS);
58 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
61 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ethaddr_usage);
63 int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st)
67 if (st->l3_proto == ETH_P_IP) {
68 struct flow_match_ipv4_addrs mt;
70 flow_rule_match_ipv4_addrs(st->frule, &mt);
72 err = vcap_rule_add_key_u32(st->vrule,
74 be32_to_cpu(mt.key->src),
75 be32_to_cpu(mt.mask->src));
80 err = vcap_rule_add_key_u32(st->vrule,
82 be32_to_cpu(mt.key->dst),
83 be32_to_cpu(mt.mask->dst));
89 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
94 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
97 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv4_usage);
99 int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st)
103 if (st->l3_proto == ETH_P_IPV6) {
104 struct flow_match_ipv6_addrs mt;
105 struct vcap_u128_key sip;
106 struct vcap_u128_key dip;
108 flow_rule_match_ipv6_addrs(st->frule, &mt);
109 /* Check if address masks are non-zero */
110 if (!ipv6_addr_any(&mt.mask->src)) {
111 vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
112 vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
113 err = vcap_rule_add_key_u128(st->vrule,
114 VCAP_KF_L3_IP6_SIP, &sip);
118 if (!ipv6_addr_any(&mt.mask->dst)) {
119 vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
120 vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
121 err = vcap_rule_add_key_u128(st->vrule,
122 VCAP_KF_L3_IP6_DIP, &dip);
127 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
130 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
133 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv6_usage);
135 int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st)
137 struct flow_match_ports mt;
141 flow_rule_match_ports(st->frule, &mt);
144 value = be16_to_cpu(mt.key->src);
145 mask = be16_to_cpu(mt.mask->src);
146 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
153 value = be16_to_cpu(mt.key->dst);
154 mask = be16_to_cpu(mt.mask->dst);
155 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
161 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_PORTS);
166 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
169 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_portnum_usage);
171 int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
173 enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0;
174 enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0;
175 struct flow_match_vlan mt;
179 flow_rule_match_cvlan(st->frule, &mt);
181 tpid = be16_to_cpu(mt.key->vlan_tpid);
183 if (tpid == ETH_P_8021Q) {
184 vid_key = VCAP_KF_8021Q_VID1;
185 pcp_key = VCAP_KF_8021Q_PCP1;
188 if (mt.mask->vlan_id) {
189 err = vcap_rule_add_key_u32(st->vrule, vid_key,
196 if (mt.mask->vlan_priority) {
197 err = vcap_rule_add_key_u32(st->vrule, pcp_key,
198 mt.key->vlan_priority,
199 mt.mask->vlan_priority);
204 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN);
208 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error");
211 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_cvlan_usage);
213 int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
214 enum vcap_key_field vid_key,
215 enum vcap_key_field pcp_key)
217 struct flow_match_vlan mt;
220 flow_rule_match_vlan(st->frule, &mt);
222 if (mt.mask->vlan_id) {
223 err = vcap_rule_add_key_u32(st->vrule, vid_key,
230 if (mt.mask->vlan_priority) {
231 err = vcap_rule_add_key_u32(st->vrule, pcp_key,
232 mt.key->vlan_priority,
233 mt.mask->vlan_priority);
238 if (mt.mask->vlan_tpid)
239 st->tpid = be16_to_cpu(mt.key->vlan_tpid);
241 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN);
245 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
248 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_vlan_usage);
250 int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st)
252 struct flow_match_tcp mt;
258 flow_rule_match_tcp(st->frule, &mt);
259 tcp_flags_key = be16_to_cpu(mt.key->flags);
260 tcp_flags_mask = be16_to_cpu(mt.mask->flags);
262 if (tcp_flags_mask & TCPHDR_FIN) {
264 if (tcp_flags_key & TCPHDR_FIN)
266 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
271 if (tcp_flags_mask & TCPHDR_SYN) {
273 if (tcp_flags_key & TCPHDR_SYN)
275 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
280 if (tcp_flags_mask & TCPHDR_RST) {
282 if (tcp_flags_key & TCPHDR_RST)
284 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
289 if (tcp_flags_mask & TCPHDR_PSH) {
291 if (tcp_flags_key & TCPHDR_PSH)
293 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
298 if (tcp_flags_mask & TCPHDR_ACK) {
300 if (tcp_flags_key & TCPHDR_ACK)
302 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
307 if (tcp_flags_mask & TCPHDR_URG) {
309 if (tcp_flags_key & TCPHDR_URG)
311 err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
316 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_TCP);
321 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
324 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_tcp_usage);
326 int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st)
328 struct flow_match_arp mt;
333 flow_rule_match_arp(st->frule, &mt);
337 if (st->l3_proto == ETH_P_ARP) {
338 value = mt.key->op == VCAP_ARP_OP_REQUEST ?
339 VCAP_IS2_ARP_REQUEST :
342 value = mt.key->op == VCAP_ARP_OP_REQUEST ?
343 VCAP_IS2_RARP_REQUEST :
346 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
352 /* The IS2 ARP keyset does not support ARP hardware addresses */
353 if (!is_zero_ether_addr(mt.mask->sha) ||
354 !is_zero_ether_addr(mt.mask->tha)) {
360 ipval = be32_to_cpu((__force __be32)mt.key->sip);
361 ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
363 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
370 ipval = be32_to_cpu((__force __be32)mt.key->tip);
371 ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
373 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
379 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_ARP);
384 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
387 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_arp_usage);
389 int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st)
391 struct flow_match_ip mt;
394 flow_rule_match_ip(st->frule, &mt);
397 err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
404 st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_IP);
409 NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
412 EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ip_usage);