1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019, Intel Corporation. */
7 #define ICE_FLOW_ENTRY_HANDLE_INVAL 0
8 #define ICE_FLOW_FLD_OFF_INVAL 0xffff
10 /* Generate flow hash field from flow field type(s) */
11 #define ICE_FLOW_HASH_ETH \
12 (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \
13 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA))
14 #define ICE_FLOW_HASH_IPV4 \
15 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
16 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
17 #define ICE_FLOW_HASH_IPV6 \
18 (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
19 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
20 #define ICE_FLOW_HASH_TCP_PORT \
21 (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
22 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
23 #define ICE_FLOW_HASH_UDP_PORT \
24 (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
25 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
26 #define ICE_FLOW_HASH_SCTP_PORT \
27 (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
28 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
30 #define ICE_HASH_INVALID 0
31 #define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
32 #define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
33 #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
34 #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
36 #define ICE_FLOW_HASH_GTP_TEID \
37 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
39 #define ICE_FLOW_HASH_GTP_IPV4_TEID \
40 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
41 #define ICE_FLOW_HASH_GTP_IPV6_TEID \
42 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
44 #define ICE_FLOW_HASH_GTP_U_TEID \
45 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
47 #define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
48 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
49 #define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
50 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
52 #define ICE_FLOW_HASH_GTP_U_EH_TEID \
53 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
55 #define ICE_FLOW_HASH_GTP_U_EH_QFI \
56 (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
58 #define ICE_FLOW_HASH_GTP_U_IPV4_EH \
59 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
60 ICE_FLOW_HASH_GTP_U_EH_QFI)
61 #define ICE_FLOW_HASH_GTP_U_IPV6_EH \
62 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
63 ICE_FLOW_HASH_GTP_U_EH_QFI)
65 #define ICE_FLOW_HASH_PPPOE_SESS_ID \
66 (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
68 #define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
69 (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
70 #define ICE_FLOW_HASH_PPPOE_TCP_ID \
71 (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
72 #define ICE_FLOW_HASH_PPPOE_UDP_ID \
73 (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
75 #define ICE_FLOW_HASH_PFCP_SEID \
76 (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
77 #define ICE_FLOW_HASH_PFCP_IPV4_SEID \
78 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
79 #define ICE_FLOW_HASH_PFCP_IPV6_SEID \
80 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
82 #define ICE_FLOW_HASH_L2TPV3_SESS_ID \
83 (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
84 #define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
85 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
86 #define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
87 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
89 #define ICE_FLOW_HASH_ESP_SPI \
90 (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
91 #define ICE_FLOW_HASH_ESP_IPV4_SPI \
92 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
93 #define ICE_FLOW_HASH_ESP_IPV6_SPI \
94 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
96 #define ICE_FLOW_HASH_AH_SPI \
97 (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
98 #define ICE_FLOW_HASH_AH_IPV4_SPI \
99 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
100 #define ICE_FLOW_HASH_AH_IPV6_SPI \
101 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
103 #define ICE_FLOW_HASH_NAT_T_ESP_SPI \
104 (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
105 #define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
106 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
107 #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
108 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
110 /* Protocol header fields within a packet segment. A segment consists of one or
111 * more protocol headers that make up a logical group of protocol headers. Each
112 * logical group of protocol headers encapsulates or is encapsulated using/by
113 * tunneling or encapsulation protocols for network virtualization such as GRE,
116 enum ice_flow_seg_hdr {
117 ICE_FLOW_SEG_HDR_NONE = 0x00000000,
118 ICE_FLOW_SEG_HDR_ETH = 0x00000001,
119 ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
120 ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
121 ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
122 ICE_FLOW_SEG_HDR_ARP = 0x00000010,
123 ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
124 ICE_FLOW_SEG_HDR_TCP = 0x00000040,
125 ICE_FLOW_SEG_HDR_UDP = 0x00000080,
126 ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
127 ICE_FLOW_SEG_HDR_GRE = 0x00000200,
128 ICE_FLOW_SEG_HDR_GTPC = 0x00000400,
129 ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
130 ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
131 ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
132 ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
133 ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
134 ICE_FLOW_SEG_HDR_PPPOE = 0x00010000,
135 ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000,
136 ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000,
137 ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000,
138 ICE_FLOW_SEG_HDR_ESP = 0x00100000,
139 ICE_FLOW_SEG_HDR_AH = 0x00200000,
140 ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
141 ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
142 /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
143 * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
145 ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
148 /* These segments all have the same PTYPES, but are otherwise distinguished by
149 * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
151 * gtp_eh_pdu gtp_eh_pdu_link
152 * ICE_FLOW_SEG_HDR_GTPU_IP 0 0
153 * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care
154 * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0
155 * ICE_FLOW_SEG_HDR_GTPU_UP 1 1
157 #define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
158 ICE_FLOW_SEG_HDR_GTPU_EH | \
159 ICE_FLOW_SEG_HDR_GTPU_DWN | \
160 ICE_FLOW_SEG_HDR_GTPU_UP)
161 #define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
162 ICE_FLOW_SEG_HDR_PFCP_SESSION)
164 enum ice_flow_field {
166 ICE_FLOW_FIELD_IDX_ETH_DA,
167 ICE_FLOW_FIELD_IDX_ETH_SA,
168 ICE_FLOW_FIELD_IDX_S_VLAN,
169 ICE_FLOW_FIELD_IDX_C_VLAN,
170 ICE_FLOW_FIELD_IDX_ETH_TYPE,
172 ICE_FLOW_FIELD_IDX_IPV4_DSCP,
173 ICE_FLOW_FIELD_IDX_IPV6_DSCP,
174 ICE_FLOW_FIELD_IDX_IPV4_TTL,
175 ICE_FLOW_FIELD_IDX_IPV4_PROT,
176 ICE_FLOW_FIELD_IDX_IPV6_TTL,
177 ICE_FLOW_FIELD_IDX_IPV6_PROT,
178 ICE_FLOW_FIELD_IDX_IPV4_SA,
179 ICE_FLOW_FIELD_IDX_IPV4_DA,
180 ICE_FLOW_FIELD_IDX_IPV6_SA,
181 ICE_FLOW_FIELD_IDX_IPV6_DA,
183 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
184 ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
185 ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
186 ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
187 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
188 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
189 ICE_FLOW_FIELD_IDX_TCP_FLAGS,
191 ICE_FLOW_FIELD_IDX_ARP_SIP,
192 ICE_FLOW_FIELD_IDX_ARP_DIP,
193 ICE_FLOW_FIELD_IDX_ARP_SHA,
194 ICE_FLOW_FIELD_IDX_ARP_DHA,
195 ICE_FLOW_FIELD_IDX_ARP_OP,
197 ICE_FLOW_FIELD_IDX_ICMP_TYPE,
198 ICE_FLOW_FIELD_IDX_ICMP_CODE,
200 ICE_FLOW_FIELD_IDX_GRE_KEYID,
202 ICE_FLOW_FIELD_IDX_GTPC_TEID,
204 ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
206 ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
207 ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
209 ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
211 ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
213 ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
215 ICE_FLOW_FIELD_IDX_PFCP_SEID,
217 ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
219 ICE_FLOW_FIELD_IDX_ESP_SPI,
221 ICE_FLOW_FIELD_IDX_AH_SPI,
223 ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
224 /* The total number of enums must not exceed 64 */
225 ICE_FLOW_FIELD_IDX_MAX
228 /* Flow headers and fields for AVF support */
229 enum ice_flow_avf_hdr_field {
230 /* Values 0 - 28 are reserved for future use */
231 ICE_AVF_FLOW_FIELD_INVALID = 0,
232 ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
233 ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
234 ICE_AVF_FLOW_FIELD_IPV4_UDP,
235 ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
236 ICE_AVF_FLOW_FIELD_IPV4_TCP,
237 ICE_AVF_FLOW_FIELD_IPV4_SCTP,
238 ICE_AVF_FLOW_FIELD_IPV4_OTHER,
239 ICE_AVF_FLOW_FIELD_FRAG_IPV4,
240 /* Values 37-38 are reserved */
241 ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
242 ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
243 ICE_AVF_FLOW_FIELD_IPV6_UDP,
244 ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
245 ICE_AVF_FLOW_FIELD_IPV6_TCP,
246 ICE_AVF_FLOW_FIELD_IPV6_SCTP,
247 ICE_AVF_FLOW_FIELD_IPV6_OTHER,
248 ICE_AVF_FLOW_FIELD_FRAG_IPV6,
249 ICE_AVF_FLOW_FIELD_RSVD47,
250 ICE_AVF_FLOW_FIELD_FCOE_OX,
251 ICE_AVF_FLOW_FIELD_FCOE_RX,
252 ICE_AVF_FLOW_FIELD_FCOE_OTHER,
253 /* Values 51-62 are reserved */
254 ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
255 ICE_AVF_FLOW_FIELD_MAX
258 /* Supported RSS offloads This macro is defined to support
259 * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
260 * capabilities to the caller of this ops.
262 #define ICE_DEFAULT_RSS_HENA ( \
263 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
264 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
265 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
266 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
267 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
268 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
269 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
270 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
271 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
272 BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
273 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
274 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
275 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
276 BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
277 BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
278 BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
284 enum ice_flow_priority {
286 ICE_FLOW_PRIO_NORMAL,
290 #define ICE_FLOW_SEG_MAX 2
291 #define ICE_FLOW_SEG_RAW_FLD_MAX 2
292 #define ICE_FLOW_FV_EXTRACT_SZ 2
294 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
296 struct ice_flow_seg_xtrct {
297 u8 prot_id; /* Protocol ID of extracted header field */
298 u16 off; /* Starting offset of the field in header in bytes */
299 u8 idx; /* Index of FV entry used */
300 u8 disp; /* Displacement of field in bits fr. FV entry's start */
301 u16 mask; /* Mask for field */
304 enum ice_flow_fld_match_type {
305 ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
306 ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
307 ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
308 ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
311 struct ice_flow_fld_loc {
312 /* Describe offsets of field information relative to the beginning of
313 * input buffer provided when adding flow entries.
315 u16 val; /* Offset where the value is located */
316 u16 mask; /* Offset where the mask/prefix value is located */
317 u16 last; /* Length or offset where the upper value is located */
320 struct ice_flow_fld_info {
321 enum ice_flow_fld_match_type type;
322 /* Location where to retrieve data from an input buffer */
323 struct ice_flow_fld_loc src;
324 /* Location where to put the data into the final entry buffer */
325 struct ice_flow_fld_loc entry;
326 struct ice_flow_seg_xtrct xtrct;
329 struct ice_flow_seg_fld_raw {
330 struct ice_flow_fld_info info;
331 u16 off; /* Offset from the start of the segment */
334 struct ice_flow_seg_info {
335 u32 hdrs; /* Bitmask indicating protocol headers present */
336 u64 match; /* Bitmask indicating header fields to be matched */
337 u64 range; /* Bitmask indicating header fields matched as ranges */
339 struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
341 u8 raws_cnt; /* Number of raw fields to be matched */
342 struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
345 /* This structure describes a flow entry, and is tracked only in this file */
346 struct ice_flow_entry {
347 struct list_head l_entry;
350 struct ice_flow_prof *prof;
351 /* Flow entry's content */
353 enum ice_flow_priority priority;
358 #define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
359 #define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h))
361 struct ice_flow_prof {
362 struct list_head l_entry;
365 enum ice_flow_dir dir;
368 /* Keep track of flow entries associated with this flow profile */
369 struct mutex entries_lock;
370 struct list_head entries;
372 struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
374 /* software VSI handles referenced by this flow profile */
375 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
379 struct list_head l_entry;
380 /* bitmap of VSIs added to the RSS entry */
381 DECLARE_BITMAP(vsis, ICE_MAX_VSI);
387 ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
388 u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
389 struct ice_flow_prof **prof);
391 ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
393 ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
394 u64 entry_id, u16 vsi, enum ice_flow_priority prio,
395 void *data, u64 *entry_h);
397 ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
399 ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
400 u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
402 ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
403 u16 val_loc, u16 mask_loc);
404 void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
405 enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
407 ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
408 enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
410 ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
413 ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
415 u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
416 #endif /* _ICE_FLOW_H_ */