1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
5 #include "ice_switch.h"
7 #define ICE_ETH_DA_OFFSET 0
8 #define ICE_ETH_ETHTYPE_OFFSET 12
9 #define ICE_ETH_VLAN_TCI_OFFSET 14
10 #define ICE_MAX_VLAN_ID 0xFFF
11 #define ICE_IPV6_ETHER_ID 0x86DD
13 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
14 * struct to configure any switch filter rules.
15 * {DA (6 bytes), SA(6 bytes),
16 * Ether type (2 bytes for header without VLAN tag) OR
17 * VLAN tag (4 bytes for header with VLAN tag) }
19 * Word on Hardcoded values
20 * byte 0 = 0x2: to identify it as locally administered DA MAC
21 * byte 6 = 0x2: to identify it as locally administered SA MAC
22 * byte 12 = 0x81 & byte 13 = 0x00:
23 * In case of VLAN filter first two bytes defines ether type (0x8100)
24 * and remaining two bytes are placeholder for programming a given VLAN ID
25 * In case of Ether type filter it is treated as header without VLAN tag
26 * and byte 12 and 13 is used to program a given Ether type instead
28 #define DUMMY_ETH_HDR_LEN 16
29 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
34 ICE_PKT_OUTER_IPV6 = BIT(0),
35 ICE_PKT_TUN_GTPC = BIT(1),
36 ICE_PKT_TUN_GTPU = BIT(2),
37 ICE_PKT_TUN_NVGRE = BIT(3),
38 ICE_PKT_TUN_UDP = BIT(4),
39 ICE_PKT_INNER_IPV6 = BIT(5),
40 ICE_PKT_INNER_TCP = BIT(6),
41 ICE_PKT_INNER_UDP = BIT(7),
42 ICE_PKT_GTP_NOPAY = BIT(8),
43 ICE_PKT_KMALLOC = BIT(9),
44 ICE_PKT_PPPOE = BIT(10),
45 ICE_PKT_L2TPV3 = BIT(11),
48 struct ice_dummy_pkt_offsets {
49 enum ice_protocol_type type;
50 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
53 struct ice_dummy_pkt_profile {
54 const struct ice_dummy_pkt_offsets *offsets;
61 #define ICE_DECLARE_PKT_OFFSETS(type) \
62 static const struct ice_dummy_pkt_offsets \
63 ice_dummy_##type##_packet_offsets[]
65 #define ICE_DECLARE_PKT_TEMPLATE(type) \
66 static const u8 ice_dummy_##type##_packet[]
68 #define ICE_PKT_PROFILE(type, m) { \
70 .pkt = ice_dummy_##type##_packet, \
71 .pkt_len = sizeof(ice_dummy_##type##_packet), \
72 .offsets = ice_dummy_##type##_packet_offsets, \
73 .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
76 ICE_DECLARE_PKT_OFFSETS(vlan) = {
77 { ICE_VLAN_OFOS, 12 },
80 ICE_DECLARE_PKT_TEMPLATE(vlan) = {
81 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
84 ICE_DECLARE_PKT_OFFSETS(qinq) = {
89 ICE_DECLARE_PKT_TEMPLATE(qinq) = {
90 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
91 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
94 ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
97 { ICE_IPV4_OFOS, 14 },
100 { ICE_ETYPE_IL, 54 },
103 { ICE_PROTOCOL_LAST, 0 },
106 ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
107 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
108 0x00, 0x00, 0x00, 0x00,
109 0x00, 0x00, 0x00, 0x00,
111 0x08, 0x00, /* ICE_ETYPE_OL 12 */
113 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
114 0x00, 0x00, 0x00, 0x00,
115 0x00, 0x2F, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
119 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
120 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
123 0x00, 0x00, 0x00, 0x00,
124 0x00, 0x00, 0x00, 0x00,
126 0x08, 0x00, /* ICE_ETYPE_IL 54 */
128 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x06, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00,
132 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
135 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00,
137 0x50, 0x02, 0x20, 0x00,
138 0x00, 0x00, 0x00, 0x00
141 ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
143 { ICE_ETYPE_OL, 12 },
144 { ICE_IPV4_OFOS, 14 },
147 { ICE_ETYPE_IL, 54 },
149 { ICE_UDP_ILOS, 76 },
150 { ICE_PROTOCOL_LAST, 0 },
153 ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
154 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
155 0x00, 0x00, 0x00, 0x00,
156 0x00, 0x00, 0x00, 0x00,
158 0x08, 0x00, /* ICE_ETYPE_OL 12 */
160 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
161 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x2F, 0x00, 0x00,
163 0x00, 0x00, 0x00, 0x00,
164 0x00, 0x00, 0x00, 0x00,
166 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
167 0x00, 0x00, 0x00, 0x00,
169 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
170 0x00, 0x00, 0x00, 0x00,
171 0x00, 0x00, 0x00, 0x00,
173 0x08, 0x00, /* ICE_ETYPE_IL 54 */
175 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
176 0x00, 0x00, 0x00, 0x00,
177 0x00, 0x11, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x00, 0x00,
181 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
182 0x00, 0x08, 0x00, 0x00,
185 ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
187 { ICE_ETYPE_OL, 12 },
188 { ICE_IPV4_OFOS, 14 },
192 { ICE_VXLAN_GPE, 42 },
194 { ICE_ETYPE_IL, 62 },
197 { ICE_PROTOCOL_LAST, 0 },
200 ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
201 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
202 0x00, 0x00, 0x00, 0x00,
203 0x00, 0x00, 0x00, 0x00,
205 0x08, 0x00, /* ICE_ETYPE_OL 12 */
207 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
208 0x00, 0x01, 0x00, 0x00,
209 0x40, 0x11, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00,
211 0x00, 0x00, 0x00, 0x00,
213 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
214 0x00, 0x46, 0x00, 0x00,
216 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
217 0x00, 0x00, 0x00, 0x00,
219 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00,
223 0x08, 0x00, /* ICE_ETYPE_IL 62 */
225 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
226 0x00, 0x01, 0x00, 0x00,
227 0x40, 0x06, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
229 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
232 0x00, 0x00, 0x00, 0x00,
233 0x00, 0x00, 0x00, 0x00,
234 0x50, 0x02, 0x20, 0x00,
235 0x00, 0x00, 0x00, 0x00
238 ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
240 { ICE_ETYPE_OL, 12 },
241 { ICE_IPV4_OFOS, 14 },
245 { ICE_VXLAN_GPE, 42 },
247 { ICE_ETYPE_IL, 62 },
249 { ICE_UDP_ILOS, 84 },
250 { ICE_PROTOCOL_LAST, 0 },
253 ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
254 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
255 0x00, 0x00, 0x00, 0x00,
256 0x00, 0x00, 0x00, 0x00,
258 0x08, 0x00, /* ICE_ETYPE_OL 12 */
260 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
261 0x00, 0x01, 0x00, 0x00,
262 0x00, 0x11, 0x00, 0x00,
263 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
267 0x00, 0x3a, 0x00, 0x00,
269 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
270 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
273 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00,
276 0x08, 0x00, /* ICE_ETYPE_IL 62 */
278 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
279 0x00, 0x01, 0x00, 0x00,
280 0x00, 0x11, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
285 0x00, 0x08, 0x00, 0x00,
288 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
290 { ICE_ETYPE_OL, 12 },
291 { ICE_IPV4_OFOS, 14 },
294 { ICE_ETYPE_IL, 54 },
297 { ICE_PROTOCOL_LAST, 0 },
300 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
301 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
302 0x00, 0x00, 0x00, 0x00,
303 0x00, 0x00, 0x00, 0x00,
305 0x08, 0x00, /* ICE_ETYPE_OL 12 */
307 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
308 0x00, 0x00, 0x00, 0x00,
309 0x00, 0x2F, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00,
313 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
314 0x00, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
317 0x00, 0x00, 0x00, 0x00,
318 0x00, 0x00, 0x00, 0x00,
320 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
322 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
323 0x00, 0x08, 0x06, 0x40,
324 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
334 0x00, 0x00, 0x00, 0x00,
335 0x00, 0x00, 0x00, 0x00,
336 0x50, 0x02, 0x20, 0x00,
337 0x00, 0x00, 0x00, 0x00
340 ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
342 { ICE_ETYPE_OL, 12 },
343 { ICE_IPV4_OFOS, 14 },
346 { ICE_ETYPE_IL, 54 },
348 { ICE_UDP_ILOS, 96 },
349 { ICE_PROTOCOL_LAST, 0 },
352 ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
353 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
354 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00, 0x00,
357 0x08, 0x00, /* ICE_ETYPE_OL 12 */
359 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
360 0x00, 0x00, 0x00, 0x00,
361 0x00, 0x2F, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00,
365 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
366 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
369 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00,
372 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
374 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
375 0x00, 0x08, 0x11, 0x40,
376 0x00, 0x00, 0x00, 0x00,
377 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00,
383 0x00, 0x00, 0x00, 0x00,
385 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
386 0x00, 0x08, 0x00, 0x00,
389 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
391 { ICE_ETYPE_OL, 12 },
392 { ICE_IPV4_OFOS, 14 },
396 { ICE_VXLAN_GPE, 42 },
398 { ICE_ETYPE_IL, 62 },
401 { ICE_PROTOCOL_LAST, 0 },
404 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
405 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
406 0x00, 0x00, 0x00, 0x00,
407 0x00, 0x00, 0x00, 0x00,
409 0x08, 0x00, /* ICE_ETYPE_OL 12 */
411 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
412 0x00, 0x01, 0x00, 0x00,
413 0x40, 0x11, 0x00, 0x00,
414 0x00, 0x00, 0x00, 0x00,
415 0x00, 0x00, 0x00, 0x00,
417 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
418 0x00, 0x5a, 0x00, 0x00,
420 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
421 0x00, 0x00, 0x00, 0x00,
423 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
424 0x00, 0x00, 0x00, 0x00,
425 0x00, 0x00, 0x00, 0x00,
427 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
429 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
430 0x00, 0x08, 0x06, 0x40,
431 0x00, 0x00, 0x00, 0x00,
432 0x00, 0x00, 0x00, 0x00,
433 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00,
435 0x00, 0x00, 0x00, 0x00,
436 0x00, 0x00, 0x00, 0x00,
437 0x00, 0x00, 0x00, 0x00,
438 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
441 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00,
443 0x50, 0x02, 0x20, 0x00,
444 0x00, 0x00, 0x00, 0x00
447 ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
449 { ICE_ETYPE_OL, 12 },
450 { ICE_IPV4_OFOS, 14 },
454 { ICE_VXLAN_GPE, 42 },
456 { ICE_ETYPE_IL, 62 },
458 { ICE_UDP_ILOS, 104 },
459 { ICE_PROTOCOL_LAST, 0 },
462 ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
463 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
464 0x00, 0x00, 0x00, 0x00,
465 0x00, 0x00, 0x00, 0x00,
467 0x08, 0x00, /* ICE_ETYPE_OL 12 */
469 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
470 0x00, 0x01, 0x00, 0x00,
471 0x00, 0x11, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00,
473 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
476 0x00, 0x4e, 0x00, 0x00,
478 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
479 0x00, 0x00, 0x00, 0x00,
481 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
482 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00,
485 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
487 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
488 0x00, 0x08, 0x11, 0x40,
489 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00,
491 0x00, 0x00, 0x00, 0x00,
492 0x00, 0x00, 0x00, 0x00,
493 0x00, 0x00, 0x00, 0x00,
494 0x00, 0x00, 0x00, 0x00,
495 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
499 0x00, 0x08, 0x00, 0x00,
502 /* offset info for MAC + IPv4 + UDP dummy packet */
503 ICE_DECLARE_PKT_OFFSETS(udp) = {
505 { ICE_ETYPE_OL, 12 },
506 { ICE_IPV4_OFOS, 14 },
507 { ICE_UDP_ILOS, 34 },
508 { ICE_PROTOCOL_LAST, 0 },
511 /* Dummy packet for MAC + IPv4 + UDP */
512 ICE_DECLARE_PKT_TEMPLATE(udp) = {
513 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
514 0x00, 0x00, 0x00, 0x00,
515 0x00, 0x00, 0x00, 0x00,
517 0x08, 0x00, /* ICE_ETYPE_OL 12 */
519 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
520 0x00, 0x01, 0x00, 0x00,
521 0x00, 0x11, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00,
523 0x00, 0x00, 0x00, 0x00,
525 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
526 0x00, 0x08, 0x00, 0x00,
528 0x00, 0x00, /* 2 bytes for 4 byte alignment */
531 /* offset info for MAC + IPv4 + TCP dummy packet */
532 ICE_DECLARE_PKT_OFFSETS(tcp) = {
534 { ICE_ETYPE_OL, 12 },
535 { ICE_IPV4_OFOS, 14 },
537 { ICE_PROTOCOL_LAST, 0 },
540 /* Dummy packet for MAC + IPv4 + TCP */
541 ICE_DECLARE_PKT_TEMPLATE(tcp) = {
542 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
543 0x00, 0x00, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00,
546 0x08, 0x00, /* ICE_ETYPE_OL 12 */
548 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
549 0x00, 0x01, 0x00, 0x00,
550 0x00, 0x06, 0x00, 0x00,
551 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, 0x00, 0x00,
554 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
555 0x00, 0x00, 0x00, 0x00,
556 0x00, 0x00, 0x00, 0x00,
557 0x50, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00,
560 0x00, 0x00, /* 2 bytes for 4 byte alignment */
563 ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
565 { ICE_ETYPE_OL, 12 },
566 { ICE_IPV6_OFOS, 14 },
568 { ICE_PROTOCOL_LAST, 0 },
571 ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
572 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
573 0x00, 0x00, 0x00, 0x00,
574 0x00, 0x00, 0x00, 0x00,
576 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
578 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
579 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
580 0x00, 0x00, 0x00, 0x00,
581 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00,
589 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
590 0x00, 0x00, 0x00, 0x00,
591 0x00, 0x00, 0x00, 0x00,
592 0x50, 0x00, 0x00, 0x00,
593 0x00, 0x00, 0x00, 0x00,
595 0x00, 0x00, /* 2 bytes for 4 byte alignment */
599 ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
601 { ICE_ETYPE_OL, 12 },
602 { ICE_IPV6_OFOS, 14 },
603 { ICE_UDP_ILOS, 54 },
604 { ICE_PROTOCOL_LAST, 0 },
607 /* IPv6 + UDP dummy packet */
608 ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
609 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
610 0x00, 0x00, 0x00, 0x00,
611 0x00, 0x00, 0x00, 0x00,
613 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
615 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
616 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
617 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00,
619 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x00, 0x00, 0x00,
621 0x00, 0x00, 0x00, 0x00,
622 0x00, 0x00, 0x00, 0x00,
623 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x00, 0x00,
626 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
627 0x00, 0x10, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
630 0x00, 0x00, 0x00, 0x00,
632 0x00, 0x00, /* 2 bytes for 4 byte alignment */
635 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
636 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
638 { ICE_IPV4_OFOS, 14 },
643 { ICE_PROTOCOL_LAST, 0 },
646 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
647 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
648 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00,
652 0x45, 0x00, 0x00, 0x58, /* IP 14 */
653 0x00, 0x00, 0x00, 0x00,
654 0x00, 0x11, 0x00, 0x00,
655 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x00,
658 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
659 0x00, 0x44, 0x00, 0x00,
661 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
662 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x85,
665 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
666 0x00, 0x00, 0x00, 0x00,
668 0x45, 0x00, 0x00, 0x28, /* IP 62 */
669 0x00, 0x00, 0x00, 0x00,
670 0x00, 0x06, 0x00, 0x00,
671 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
675 0x00, 0x00, 0x00, 0x00,
676 0x00, 0x00, 0x00, 0x00,
677 0x50, 0x00, 0x00, 0x00,
678 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, /* 2 bytes for 4 byte alignment */
683 /* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
684 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
686 { ICE_IPV4_OFOS, 14 },
690 { ICE_UDP_ILOS, 82 },
691 { ICE_PROTOCOL_LAST, 0 },
694 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
695 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
696 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00,
700 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
701 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x11, 0x00, 0x00,
703 0x00, 0x00, 0x00, 0x00,
704 0x00, 0x00, 0x00, 0x00,
706 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
707 0x00, 0x38, 0x00, 0x00,
709 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
710 0x00, 0x00, 0x00, 0x00,
711 0x00, 0x00, 0x00, 0x85,
713 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
714 0x00, 0x00, 0x00, 0x00,
716 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
717 0x00, 0x00, 0x00, 0x00,
718 0x00, 0x11, 0x00, 0x00,
719 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
723 0x00, 0x08, 0x00, 0x00,
725 0x00, 0x00, /* 2 bytes for 4 byte alignment */
728 /* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
729 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
731 { ICE_IPV4_OFOS, 14 },
736 { ICE_PROTOCOL_LAST, 0 },
739 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
740 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
741 0x00, 0x00, 0x00, 0x00,
742 0x00, 0x00, 0x00, 0x00,
745 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
746 0x00, 0x00, 0x00, 0x00,
747 0x00, 0x11, 0x00, 0x00,
748 0x00, 0x00, 0x00, 0x00,
749 0x00, 0x00, 0x00, 0x00,
751 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
752 0x00, 0x58, 0x00, 0x00,
754 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
755 0x00, 0x00, 0x00, 0x00,
756 0x00, 0x00, 0x00, 0x85,
758 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
759 0x00, 0x00, 0x00, 0x00,
761 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
762 0x00, 0x14, 0x06, 0x00,
763 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00,
769 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
773 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00,
775 0x50, 0x00, 0x00, 0x00,
776 0x00, 0x00, 0x00, 0x00,
778 0x00, 0x00, /* 2 bytes for 4 byte alignment */
781 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
783 { ICE_IPV4_OFOS, 14 },
787 { ICE_UDP_ILOS, 102 },
788 { ICE_PROTOCOL_LAST, 0 },
791 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
792 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
793 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00,
797 0x45, 0x00, 0x00, 0x60, /* IP 14 */
798 0x00, 0x00, 0x00, 0x00,
799 0x00, 0x11, 0x00, 0x00,
800 0x00, 0x00, 0x00, 0x00,
801 0x00, 0x00, 0x00, 0x00,
803 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
804 0x00, 0x4c, 0x00, 0x00,
806 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
807 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x85,
810 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
811 0x00, 0x00, 0x00, 0x00,
813 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
814 0x00, 0x08, 0x11, 0x00,
815 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00,
817 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00,
820 0x00, 0x00, 0x00, 0x00,
821 0x00, 0x00, 0x00, 0x00,
822 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
825 0x00, 0x08, 0x00, 0x00,
827 0x00, 0x00, /* 2 bytes for 4 byte alignment */
830 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
832 { ICE_IPV6_OFOS, 14 },
837 { ICE_PROTOCOL_LAST, 0 },
840 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
841 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
842 0x00, 0x00, 0x00, 0x00,
843 0x00, 0x00, 0x00, 0x00,
846 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
847 0x00, 0x44, 0x11, 0x00,
848 0x00, 0x00, 0x00, 0x00,
849 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00,
851 0x00, 0x00, 0x00, 0x00,
852 0x00, 0x00, 0x00, 0x00,
853 0x00, 0x00, 0x00, 0x00,
854 0x00, 0x00, 0x00, 0x00,
855 0x00, 0x00, 0x00, 0x00,
857 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
858 0x00, 0x44, 0x00, 0x00,
860 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
861 0x00, 0x00, 0x00, 0x00,
862 0x00, 0x00, 0x00, 0x85,
864 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
865 0x00, 0x00, 0x00, 0x00,
867 0x45, 0x00, 0x00, 0x28, /* IP 82 */
868 0x00, 0x00, 0x00, 0x00,
869 0x00, 0x06, 0x00, 0x00,
870 0x00, 0x00, 0x00, 0x00,
871 0x00, 0x00, 0x00, 0x00,
873 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
874 0x00, 0x00, 0x00, 0x00,
875 0x00, 0x00, 0x00, 0x00,
876 0x50, 0x00, 0x00, 0x00,
877 0x00, 0x00, 0x00, 0x00,
879 0x00, 0x00, /* 2 bytes for 4 byte alignment */
882 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
884 { ICE_IPV6_OFOS, 14 },
888 { ICE_UDP_ILOS, 102 },
889 { ICE_PROTOCOL_LAST, 0 },
892 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
893 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
894 0x00, 0x00, 0x00, 0x00,
895 0x00, 0x00, 0x00, 0x00,
898 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
899 0x00, 0x38, 0x11, 0x00,
900 0x00, 0x00, 0x00, 0x00,
901 0x00, 0x00, 0x00, 0x00,
902 0x00, 0x00, 0x00, 0x00,
903 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00,
905 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
910 0x00, 0x38, 0x00, 0x00,
912 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
913 0x00, 0x00, 0x00, 0x00,
914 0x00, 0x00, 0x00, 0x85,
916 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
917 0x00, 0x00, 0x00, 0x00,
919 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
920 0x00, 0x00, 0x00, 0x00,
921 0x00, 0x11, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00,
925 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
926 0x00, 0x08, 0x00, 0x00,
928 0x00, 0x00, /* 2 bytes for 4 byte alignment */
931 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
933 { ICE_IPV6_OFOS, 14 },
938 { ICE_PROTOCOL_LAST, 0 },
941 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
942 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
943 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, 0x00, 0x00,
947 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
948 0x00, 0x58, 0x11, 0x00,
949 0x00, 0x00, 0x00, 0x00,
950 0x00, 0x00, 0x00, 0x00,
951 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00,
953 0x00, 0x00, 0x00, 0x00,
954 0x00, 0x00, 0x00, 0x00,
955 0x00, 0x00, 0x00, 0x00,
956 0x00, 0x00, 0x00, 0x00,
958 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
959 0x00, 0x58, 0x00, 0x00,
961 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
962 0x00, 0x00, 0x00, 0x00,
963 0x00, 0x00, 0x00, 0x85,
965 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
966 0x00, 0x00, 0x00, 0x00,
968 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
969 0x00, 0x14, 0x06, 0x00,
970 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00,
972 0x00, 0x00, 0x00, 0x00,
973 0x00, 0x00, 0x00, 0x00,
974 0x00, 0x00, 0x00, 0x00,
975 0x00, 0x00, 0x00, 0x00,
976 0x00, 0x00, 0x00, 0x00,
977 0x00, 0x00, 0x00, 0x00,
979 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
980 0x00, 0x00, 0x00, 0x00,
981 0x00, 0x00, 0x00, 0x00,
982 0x50, 0x00, 0x00, 0x00,
983 0x00, 0x00, 0x00, 0x00,
985 0x00, 0x00, /* 2 bytes for 4 byte alignment */
988 ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
990 { ICE_IPV6_OFOS, 14 },
994 { ICE_UDP_ILOS, 122 },
995 { ICE_PROTOCOL_LAST, 0 },
998 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
999 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
1000 0x00, 0x00, 0x00, 0x00,
1001 0x00, 0x00, 0x00, 0x00,
1004 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
1005 0x00, 0x4c, 0x11, 0x00,
1006 0x00, 0x00, 0x00, 0x00,
1007 0x00, 0x00, 0x00, 0x00,
1008 0x00, 0x00, 0x00, 0x00,
1009 0x00, 0x00, 0x00, 0x00,
1010 0x00, 0x00, 0x00, 0x00,
1011 0x00, 0x00, 0x00, 0x00,
1012 0x00, 0x00, 0x00, 0x00,
1013 0x00, 0x00, 0x00, 0x00,
1015 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
1016 0x00, 0x4c, 0x00, 0x00,
1018 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
1019 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x85,
1022 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
1023 0x00, 0x00, 0x00, 0x00,
1025 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
1026 0x00, 0x08, 0x11, 0x00,
1027 0x00, 0x00, 0x00, 0x00,
1028 0x00, 0x00, 0x00, 0x00,
1029 0x00, 0x00, 0x00, 0x00,
1030 0x00, 0x00, 0x00, 0x00,
1031 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x00,
1033 0x00, 0x00, 0x00, 0x00,
1034 0x00, 0x00, 0x00, 0x00,
1036 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
1037 0x00, 0x08, 0x00, 0x00,
1039 0x00, 0x00, /* 2 bytes for 4 byte alignment */
1042 ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
1043 { ICE_MAC_OFOS, 0 },
1044 { ICE_IPV4_OFOS, 14 },
1046 { ICE_GTP_NO_PAY, 42 },
1047 { ICE_PROTOCOL_LAST, 0 },
1050 ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
1051 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1052 0x00, 0x00, 0x00, 0x00,
1053 0x00, 0x00, 0x00, 0x00,
1056 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
1057 0x00, 0x00, 0x40, 0x00,
1058 0x40, 0x11, 0x00, 0x00,
1059 0x00, 0x00, 0x00, 0x00,
1060 0x00, 0x00, 0x00, 0x00,
1062 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
1063 0x00, 0x00, 0x00, 0x00,
1065 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
1066 0x00, 0x00, 0x00, 0x00,
1067 0x00, 0x00, 0x00, 0x85,
1069 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
1070 0x00, 0x00, 0x00, 0x00,
1072 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
1073 0x00, 0x00, 0x40, 0x00,
1074 0x40, 0x00, 0x00, 0x00,
1075 0x00, 0x00, 0x00, 0x00,
1076 0x00, 0x00, 0x00, 0x00,
1080 ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
1081 { ICE_MAC_OFOS, 0 },
1082 { ICE_IPV6_OFOS, 14 },
1084 { ICE_GTP_NO_PAY, 62 },
1085 { ICE_PROTOCOL_LAST, 0 },
1088 ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
1089 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1090 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00,
1094 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
1095 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
1096 0x00, 0x00, 0x00, 0x00,
1097 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00,
1099 0x00, 0x00, 0x00, 0x00,
1100 0x00, 0x00, 0x00, 0x00,
1101 0x00, 0x00, 0x00, 0x00,
1102 0x00, 0x00, 0x00, 0x00,
1103 0x00, 0x00, 0x00, 0x00,
1105 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
1106 0x00, 0x00, 0x00, 0x00,
1108 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
1109 0x00, 0x00, 0x00, 0x00,
1114 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
1115 { ICE_MAC_OFOS, 0 },
1116 { ICE_ETYPE_OL, 12 },
1118 { ICE_IPV4_OFOS, 22 },
1120 { ICE_PROTOCOL_LAST, 0 },
1123 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
1124 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1125 0x00, 0x00, 0x00, 0x00,
1126 0x00, 0x00, 0x00, 0x00,
1128 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1130 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1133 0x00, 0x21, /* PPP Link Layer 20 */
1135 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
1136 0x00, 0x01, 0x00, 0x00,
1137 0x00, 0x06, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00,
1139 0x00, 0x00, 0x00, 0x00,
1141 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
1142 0x00, 0x00, 0x00, 0x00,
1143 0x00, 0x00, 0x00, 0x00,
1144 0x50, 0x00, 0x00, 0x00,
1145 0x00, 0x00, 0x00, 0x00,
1147 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1150 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
1151 { ICE_MAC_OFOS, 0 },
1152 { ICE_ETYPE_OL, 12 },
1154 { ICE_IPV4_OFOS, 22 },
1155 { ICE_UDP_ILOS, 42 },
1156 { ICE_PROTOCOL_LAST, 0 },
1159 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
1160 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1161 0x00, 0x00, 0x00, 0x00,
1162 0x00, 0x00, 0x00, 0x00,
1164 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1166 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1169 0x00, 0x21, /* PPP Link Layer 20 */
1171 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
1172 0x00, 0x01, 0x00, 0x00,
1173 0x00, 0x11, 0x00, 0x00,
1174 0x00, 0x00, 0x00, 0x00,
1175 0x00, 0x00, 0x00, 0x00,
1177 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
1178 0x00, 0x08, 0x00, 0x00,
1180 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1183 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
1184 { ICE_MAC_OFOS, 0 },
1185 { ICE_ETYPE_OL, 12 },
1187 { ICE_IPV6_OFOS, 22 },
1189 { ICE_PROTOCOL_LAST, 0 },
1192 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
1193 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1194 0x00, 0x00, 0x00, 0x00,
1195 0x00, 0x00, 0x00, 0x00,
1197 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1199 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1202 0x00, 0x57, /* PPP Link Layer 20 */
1204 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1205 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
1206 0x00, 0x00, 0x00, 0x00,
1207 0x00, 0x00, 0x00, 0x00,
1208 0x00, 0x00, 0x00, 0x00,
1209 0x00, 0x00, 0x00, 0x00,
1210 0x00, 0x00, 0x00, 0x00,
1211 0x00, 0x00, 0x00, 0x00,
1212 0x00, 0x00, 0x00, 0x00,
1213 0x00, 0x00, 0x00, 0x00,
1215 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
1216 0x00, 0x00, 0x00, 0x00,
1217 0x00, 0x00, 0x00, 0x00,
1218 0x50, 0x00, 0x00, 0x00,
1219 0x00, 0x00, 0x00, 0x00,
1221 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1224 ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
1225 { ICE_MAC_OFOS, 0 },
1226 { ICE_ETYPE_OL, 12 },
1228 { ICE_IPV6_OFOS, 22 },
1229 { ICE_UDP_ILOS, 62 },
1230 { ICE_PROTOCOL_LAST, 0 },
1233 ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
1234 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1235 0x00, 0x00, 0x00, 0x00,
1236 0x00, 0x00, 0x00, 0x00,
1238 0x88, 0x64, /* ICE_ETYPE_OL 12 */
1240 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
1243 0x00, 0x57, /* PPP Link Layer 20 */
1245 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
1246 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
1247 0x00, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x00, 0x00,
1249 0x00, 0x00, 0x00, 0x00,
1250 0x00, 0x00, 0x00, 0x00,
1251 0x00, 0x00, 0x00, 0x00,
1252 0x00, 0x00, 0x00, 0x00,
1253 0x00, 0x00, 0x00, 0x00,
1254 0x00, 0x00, 0x00, 0x00,
1256 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
1257 0x00, 0x08, 0x00, 0x00,
1259 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1262 ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
1263 { ICE_MAC_OFOS, 0 },
1264 { ICE_ETYPE_OL, 12 },
1265 { ICE_IPV4_OFOS, 14 },
1267 { ICE_PROTOCOL_LAST, 0 },
1270 ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
1271 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1272 0x00, 0x00, 0x00, 0x00,
1273 0x00, 0x00, 0x00, 0x00,
1275 0x08, 0x00, /* ICE_ETYPE_OL 12 */
1277 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
1278 0x00, 0x00, 0x40, 0x00,
1279 0x40, 0x73, 0x00, 0x00,
1280 0x00, 0x00, 0x00, 0x00,
1281 0x00, 0x00, 0x00, 0x00,
1283 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
1284 0x00, 0x00, 0x00, 0x00,
1285 0x00, 0x00, 0x00, 0x00,
1286 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1289 ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
1290 { ICE_MAC_OFOS, 0 },
1291 { ICE_ETYPE_OL, 12 },
1292 { ICE_IPV6_OFOS, 14 },
1294 { ICE_PROTOCOL_LAST, 0 },
1297 ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
1298 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
1299 0x00, 0x00, 0x00, 0x00,
1300 0x00, 0x00, 0x00, 0x00,
1302 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
1304 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
1305 0x00, 0x0c, 0x73, 0x40,
1306 0x00, 0x00, 0x00, 0x00,
1307 0x00, 0x00, 0x00, 0x00,
1308 0x00, 0x00, 0x00, 0x00,
1309 0x00, 0x00, 0x00, 0x00,
1310 0x00, 0x00, 0x00, 0x00,
1311 0x00, 0x00, 0x00, 0x00,
1312 0x00, 0x00, 0x00, 0x00,
1313 0x00, 0x00, 0x00, 0x00,
1315 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
1316 0x00, 0x00, 0x00, 0x00,
1317 0x00, 0x00, 0x00, 0x00,
1318 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
1321 static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
1322 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
1324 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1325 ICE_PKT_OUTER_IPV6 |
1326 ICE_PKT_INNER_IPV6 |
1328 ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1329 ICE_PKT_OUTER_IPV6 |
1330 ICE_PKT_INNER_IPV6),
1331 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1332 ICE_PKT_OUTER_IPV6 |
1334 ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
1335 ICE_PKT_OUTER_IPV6),
1336 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
1337 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
1338 ICE_PKT_INNER_IPV6 |
1340 ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
1341 ICE_PKT_INNER_IPV6),
1342 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
1344 ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
1345 ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
1346 ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
1347 ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
1349 ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
1350 ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
1351 ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
1352 ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
1354 ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
1355 ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
1356 ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
1357 ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
1358 ICE_PKT_INNER_IPV6 |
1360 ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
1361 ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
1362 ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
1363 ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
1364 ICE_PKT_INNER_IPV6),
1365 ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
1366 ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
1367 ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
1368 ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
1369 ICE_PKT_PROFILE(tcp, 0),
1372 #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
1373 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
1374 ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
1375 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
1376 ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
1377 #define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
1378 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
1380 /* this is a recipe to profile association bitmap */
1381 static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
1382 ICE_MAX_NUM_PROFILES);
1384 /* this is a profile to recipe association bitmap */
1385 static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
1386 ICE_MAX_NUM_RECIPES);
1389 * ice_init_def_sw_recp - initialize the recipe book keeping tables
1390 * @hw: pointer to the HW struct
1392 * Allocate memory for the entire recipe table and initialize the structures/
1393 * entries corresponding to basic recipes.
1395 int ice_init_def_sw_recp(struct ice_hw *hw)
1397 struct ice_sw_recipe *recps;
1400 recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
1401 sizeof(*recps), GFP_KERNEL);
1405 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
1406 recps[i].root_rid = i;
1407 INIT_LIST_HEAD(&recps[i].filt_rules);
1408 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
1409 INIT_LIST_HEAD(&recps[i].rg_list);
1410 mutex_init(&recps[i].filt_rule_lock);
1413 hw->switch_info->recp_list = recps;
1419 * ice_aq_get_sw_cfg - get switch configuration
1420 * @hw: pointer to the hardware structure
1421 * @buf: pointer to the result buffer
1422 * @buf_size: length of the buffer available for response
1423 * @req_desc: pointer to requested descriptor
1424 * @num_elems: pointer to number of elements
1425 * @cd: pointer to command details structure or NULL
1427 * Get switch configuration (0x0200) to be placed in buf.
1428 * This admin command returns information such as initial VSI/port number
1429 * and switch ID it belongs to.
1431 * NOTE: *req_desc is both an input/output parameter.
1432 * The caller of this function first calls this function with *request_desc set
1433 * to 0. If the response from f/w has *req_desc set to 0, all the switch
1434 * configuration information has been returned; if non-zero (meaning not all
1435 * the information was returned), the caller should call this function again
1436 * with *req_desc set to the previous value returned by f/w to get the
1437 * next block of switch configuration information.
1439 * *num_elems is output only parameter. This reflects the number of elements
1440 * in response buffer. The caller of this function to use *num_elems while
1441 * parsing the response buffer.
1444 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
1445 u16 buf_size, u16 *req_desc, u16 *num_elems,
1446 struct ice_sq_cd *cd)
1448 struct ice_aqc_get_sw_cfg *cmd;
1449 struct ice_aq_desc desc;
1452 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
1453 cmd = &desc.params.get_sw_conf;
1454 cmd->element = cpu_to_le16(*req_desc);
1456 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1458 *req_desc = le16_to_cpu(cmd->element);
1459 *num_elems = le16_to_cpu(cmd->num_elems);
1467 * @hw: pointer to the HW struct
1468 * @vsi_ctx: pointer to a VSI context struct
1469 * @cd: pointer to command details structure or NULL
1471 * Add a VSI context to the hardware (0x0210)
1474 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1475 struct ice_sq_cd *cd)
1477 struct ice_aqc_add_update_free_vsi_resp *res;
1478 struct ice_aqc_add_get_update_free_vsi *cmd;
1479 struct ice_aq_desc desc;
1482 cmd = &desc.params.vsi_cmd;
1483 res = &desc.params.add_update_free_vsi_res;
1485 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
1487 if (!vsi_ctx->alloc_from_pool)
1488 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num |
1489 ICE_AQ_VSI_IS_VALID);
1490 cmd->vf_id = vsi_ctx->vf_num;
1492 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
1494 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1496 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1497 sizeof(vsi_ctx->info), cd);
1500 vsi_ctx->vsi_num = le16_to_cpu(res->vsi_num) & ICE_AQ_VSI_NUM_M;
1501 vsi_ctx->vsis_allocd = le16_to_cpu(res->vsi_used);
1502 vsi_ctx->vsis_unallocated = le16_to_cpu(res->vsi_free);
1510 * @hw: pointer to the HW struct
1511 * @vsi_ctx: pointer to a VSI context struct
1512 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1513 * @cd: pointer to command details structure or NULL
1515 * Free VSI context info from hardware (0x0213)
1518 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1519 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1521 struct ice_aqc_add_update_free_vsi_resp *resp;
1522 struct ice_aqc_add_get_update_free_vsi *cmd;
1523 struct ice_aq_desc desc;
1526 cmd = &desc.params.vsi_cmd;
1527 resp = &desc.params.add_update_free_vsi_res;
1529 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
1531 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1533 cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC);
1535 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1537 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1538 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1546 * @hw: pointer to the HW struct
1547 * @vsi_ctx: pointer to a VSI context struct
1548 * @cd: pointer to command details structure or NULL
1550 * Update VSI context in the hardware (0x0211)
1553 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
1554 struct ice_sq_cd *cd)
1556 struct ice_aqc_add_update_free_vsi_resp *resp;
1557 struct ice_aqc_add_get_update_free_vsi *cmd;
1558 struct ice_aq_desc desc;
1561 cmd = &desc.params.vsi_cmd;
1562 resp = &desc.params.add_update_free_vsi_res;
1564 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
1566 cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
1568 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1570 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
1571 sizeof(vsi_ctx->info), cd);
1574 vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used);
1575 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
1582 * ice_is_vsi_valid - check whether the VSI is valid or not
1583 * @hw: pointer to the HW struct
1584 * @vsi_handle: VSI handle
1586 * check whether the VSI is valid or not
1588 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
1590 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
1594 * ice_get_hw_vsi_num - return the HW VSI number
1595 * @hw: pointer to the HW struct
1596 * @vsi_handle: VSI handle
1598 * return the HW VSI number
1599 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
1601 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
1603 return hw->vsi_ctx[vsi_handle]->vsi_num;
1607 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
1608 * @hw: pointer to the HW struct
1609 * @vsi_handle: VSI handle
1611 * return the VSI context entry for a given VSI handle
1613 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1615 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
1619 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
1620 * @hw: pointer to the HW struct
1621 * @vsi_handle: VSI handle
1622 * @vsi: VSI context pointer
1624 * save the VSI context entry for a given VSI handle
1627 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
1629 hw->vsi_ctx[vsi_handle] = vsi;
1633 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
1634 * @hw: pointer to the HW struct
1635 * @vsi_handle: VSI handle
1637 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
1639 struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle);
1644 ice_for_each_traffic_class(i) {
1645 devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
1646 vsi->lan_q_ctx[i] = NULL;
1647 devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
1648 vsi->rdma_q_ctx[i] = NULL;
1653 * ice_clear_vsi_ctx - clear the VSI context entry
1654 * @hw: pointer to the HW struct
1655 * @vsi_handle: VSI handle
1657 * clear the VSI context entry
1659 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
1661 struct ice_vsi_ctx *vsi;
1663 vsi = ice_get_vsi_ctx(hw, vsi_handle);
1665 ice_clear_vsi_q_ctx(hw, vsi_handle);
1666 devm_kfree(ice_hw_to_dev(hw), vsi);
1667 hw->vsi_ctx[vsi_handle] = NULL;
1672 * ice_clear_all_vsi_ctx - clear all the VSI context entries
1673 * @hw: pointer to the HW struct
1675 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
1679 for (i = 0; i < ICE_MAX_VSI; i++)
1680 ice_clear_vsi_ctx(hw, i);
1684 * ice_add_vsi - add VSI context to the hardware and VSI handle list
1685 * @hw: pointer to the HW struct
1686 * @vsi_handle: unique VSI handle provided by drivers
1687 * @vsi_ctx: pointer to a VSI context struct
1688 * @cd: pointer to command details structure or NULL
1690 * Add a VSI context to the hardware also add it into the VSI handle list.
1691 * If this function gets called after reset for existing VSIs then update
1692 * with the new HW VSI number in the corresponding VSI handle list entry.
1695 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1696 struct ice_sq_cd *cd)
1698 struct ice_vsi_ctx *tmp_vsi_ctx;
1701 if (vsi_handle >= ICE_MAX_VSI)
1703 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
1706 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1708 /* Create a new VSI context */
1709 tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
1710 sizeof(*tmp_vsi_ctx), GFP_KERNEL);
1712 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
1715 *tmp_vsi_ctx = *vsi_ctx;
1716 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
1718 /* update with new HW VSI num */
1719 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
1726 * ice_free_vsi- free VSI context from hardware and VSI handle list
1727 * @hw: pointer to the HW struct
1728 * @vsi_handle: unique VSI handle
1729 * @vsi_ctx: pointer to a VSI context struct
1730 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
1731 * @cd: pointer to command details structure or NULL
1733 * Free VSI context info from hardware as well as from VSI handle list
1736 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1737 bool keep_vsi_alloc, struct ice_sq_cd *cd)
1741 if (!ice_is_vsi_valid(hw, vsi_handle))
1743 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1744 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
1746 ice_clear_vsi_ctx(hw, vsi_handle);
1752 * @hw: pointer to the HW struct
1753 * @vsi_handle: unique VSI handle
1754 * @vsi_ctx: pointer to a VSI context struct
1755 * @cd: pointer to command details structure or NULL
1757 * Update VSI context in the hardware
1760 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
1761 struct ice_sq_cd *cd)
1763 if (!ice_is_vsi_valid(hw, vsi_handle))
1765 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
1766 return ice_aq_update_vsi(hw, vsi_ctx, cd);
1770 * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
1771 * @hw: pointer to HW struct
1772 * @vsi_handle: VSI SW index
1773 * @enable: boolean for enable/disable
1776 ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
1778 struct ice_vsi_ctx *ctx, *cached_ctx;
1781 cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1785 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1789 ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
1790 ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
1791 ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
1793 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1796 ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1798 ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
1800 status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
1802 cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
1803 cached_ctx->info.valid_sections |= ctx->info.valid_sections;
1811 * ice_aq_alloc_free_vsi_list
1812 * @hw: pointer to the HW struct
1813 * @vsi_list_id: VSI list ID returned or used for lookup
1814 * @lkup_type: switch rule filter lookup type
1815 * @opc: switch rules population command type - pass in the command opcode
1817 * allocates or free a VSI list resource
1820 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1821 enum ice_sw_lkup_type lkup_type,
1822 enum ice_adminq_opc opc)
1824 struct ice_aqc_alloc_free_res_elem *sw_buf;
1825 struct ice_aqc_res_elem *vsi_ele;
1829 buf_len = struct_size(sw_buf, elem, 1);
1830 sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
1833 sw_buf->num_elems = cpu_to_le16(1);
1835 if (lkup_type == ICE_SW_LKUP_MAC ||
1836 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1837 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1838 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1839 lkup_type == ICE_SW_LKUP_PROMISC ||
1840 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1841 lkup_type == ICE_SW_LKUP_DFLT) {
1842 sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1843 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1845 cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1848 goto ice_aq_alloc_free_vsi_list_exit;
1851 if (opc == ice_aqc_opc_free_res)
1852 sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
1854 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1856 goto ice_aq_alloc_free_vsi_list_exit;
1858 if (opc == ice_aqc_opc_alloc_res) {
1859 vsi_ele = &sw_buf->elem[0];
1860 *vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
1863 ice_aq_alloc_free_vsi_list_exit:
1864 devm_kfree(ice_hw_to_dev(hw), sw_buf);
1869 * ice_aq_sw_rules - add/update/remove switch rules
1870 * @hw: pointer to the HW struct
1871 * @rule_list: pointer to switch rule population list
1872 * @rule_list_sz: total size of the rule list in bytes
1873 * @num_rules: number of switch rules in the rule_list
1874 * @opc: switch rules population command type - pass in the command opcode
1875 * @cd: pointer to command details structure or NULL
1877 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1880 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1881 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1883 struct ice_aq_desc desc;
1886 if (opc != ice_aqc_opc_add_sw_rules &&
1887 opc != ice_aqc_opc_update_sw_rules &&
1888 opc != ice_aqc_opc_remove_sw_rules)
1891 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1893 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1894 desc.params.sw_rules.num_rules_fltr_entry_index =
1895 cpu_to_le16(num_rules);
1896 status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1897 if (opc != ice_aqc_opc_add_sw_rules &&
1898 hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
1905 * ice_aq_add_recipe - add switch recipe
1906 * @hw: pointer to the HW struct
1907 * @s_recipe_list: pointer to switch rule population list
1908 * @num_recipes: number of switch recipes in the list
1909 * @cd: pointer to command details structure or NULL
1914 ice_aq_add_recipe(struct ice_hw *hw,
1915 struct ice_aqc_recipe_data_elem *s_recipe_list,
1916 u16 num_recipes, struct ice_sq_cd *cd)
1918 struct ice_aqc_add_get_recipe *cmd;
1919 struct ice_aq_desc desc;
1922 cmd = &desc.params.add_get_recipe;
1923 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1925 cmd->num_sub_recipes = cpu_to_le16(num_recipes);
1926 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
1928 buf_size = num_recipes * sizeof(*s_recipe_list);
1930 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1934 * ice_aq_get_recipe - get switch recipe
1935 * @hw: pointer to the HW struct
1936 * @s_recipe_list: pointer to switch rule population list
1937 * @num_recipes: pointer to the number of recipes (input and output)
1938 * @recipe_root: root recipe number of recipe(s) to retrieve
1939 * @cd: pointer to command details structure or NULL
1943 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1944 * On output, *num_recipes will equal the number of entries returned in
1947 * The caller must supply enough space in s_recipe_list to hold all possible
1948 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1951 ice_aq_get_recipe(struct ice_hw *hw,
1952 struct ice_aqc_recipe_data_elem *s_recipe_list,
1953 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1955 struct ice_aqc_add_get_recipe *cmd;
1956 struct ice_aq_desc desc;
1960 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1963 cmd = &desc.params.add_get_recipe;
1964 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1966 cmd->return_index = cpu_to_le16(recipe_root);
1967 cmd->num_sub_recipes = 0;
1969 buf_size = *num_recipes * sizeof(*s_recipe_list);
1971 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1972 *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
1978 * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
1979 * @hw: pointer to the HW struct
1980 * @params: parameters used to update the default recipe
1982 * This function only supports updating default recipes and it only supports
1983 * updating a single recipe based on the lkup_idx at a time.
1985 * This is done as a read-modify-write operation. First, get the current recipe
1986 * contents based on the recipe's ID. Then modify the field vector index and
1987 * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
1988 * the pre-existing recipe with the modifications.
1991 ice_update_recipe_lkup_idx(struct ice_hw *hw,
1992 struct ice_update_recipe_lkup_idx_params *params)
1994 struct ice_aqc_recipe_data_elem *rcp_list;
1995 u16 num_recps = ICE_MAX_NUM_RECIPES;
1998 rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
2002 /* read current recipe list from firmware */
2003 rcp_list->recipe_indx = params->rid;
2004 status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
2006 ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
2007 params->rid, status);
2011 /* only modify existing recipe's lkup_idx and mask if valid, while
2012 * leaving all other fields the same, then update the recipe firmware
2014 rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
2015 if (params->mask_valid)
2016 rcp_list->content.mask[params->lkup_idx] =
2017 cpu_to_le16(params->mask);
2019 if (params->ignore_valid)
2020 rcp_list->content.lkup_indx[params->lkup_idx] |=
2021 ICE_AQ_RECIPE_LKUP_IGNORE;
2023 status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
2025 ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
2026 params->rid, params->lkup_idx, params->fv_idx,
2027 params->mask, params->mask_valid ? "true" : "false",
2036 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
2037 * @hw: pointer to the HW struct
2038 * @profile_id: package profile ID to associate the recipe with
2039 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2040 * @cd: pointer to command details structure or NULL
2041 * Recipe to profile association (0x0291)
2044 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2045 struct ice_sq_cd *cd)
2047 struct ice_aqc_recipe_to_profile *cmd;
2048 struct ice_aq_desc desc;
2050 cmd = &desc.params.recipe_to_profile;
2051 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
2052 cmd->profile_id = cpu_to_le16(profile_id);
2053 /* Set the recipe ID bit in the bitmask to let the device know which
2054 * profile we are associating the recipe to
2056 memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
2058 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2062 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
2063 * @hw: pointer to the HW struct
2064 * @profile_id: package profile ID to associate the recipe with
2065 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
2066 * @cd: pointer to command details structure or NULL
2067 * Associate profile ID with given recipe (0x0293)
2070 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
2071 struct ice_sq_cd *cd)
2073 struct ice_aqc_recipe_to_profile *cmd;
2074 struct ice_aq_desc desc;
2077 cmd = &desc.params.recipe_to_profile;
2078 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
2079 cmd->profile_id = cpu_to_le16(profile_id);
2081 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2083 memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
2089 * ice_alloc_recipe - add recipe resource
2090 * @hw: pointer to the hardware structure
2091 * @rid: recipe ID returned as response to AQ call
2093 static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
2095 struct ice_aqc_alloc_free_res_elem *sw_buf;
2099 buf_len = struct_size(sw_buf, elem, 1);
2100 sw_buf = kzalloc(buf_len, GFP_KERNEL);
2104 sw_buf->num_elems = cpu_to_le16(1);
2105 sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
2106 ICE_AQC_RES_TYPE_S) |
2107 ICE_AQC_RES_TYPE_FLAG_SHARED);
2108 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
2109 ice_aqc_opc_alloc_res, NULL);
2111 *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
2118 * ice_get_recp_to_prof_map - updates recipe to profile mapping
2119 * @hw: pointer to hardware structure
2121 * This function is used to populate recipe_to_profile matrix where index to
2122 * this array is the recipe ID and the element is the mapping of which profiles
2123 * is this recipe mapped to.
2125 static void ice_get_recp_to_prof_map(struct ice_hw *hw)
2127 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
2130 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
2133 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
2134 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
2135 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
2137 bitmap_copy(profile_to_recipe[i], r_bitmap,
2138 ICE_MAX_NUM_RECIPES);
2139 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
2140 set_bit(i, recipe_to_profile[j]);
2145 * ice_collect_result_idx - copy result index values
2146 * @buf: buffer that contains the result index
2147 * @recp: the recipe struct to copy data into
2150 ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
2151 struct ice_sw_recipe *recp)
2153 if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2154 set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2159 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
2160 * @hw: pointer to hardware structure
2161 * @recps: struct that we need to populate
2162 * @rid: recipe ID that we are populating
2163 * @refresh_required: true if we should get recipe to profile mapping from FW
2165 * This function is used to populate all the necessary entries into our
2166 * bookkeeping so that we have a current list of all the recipes that are
2167 * programmed in the firmware.
2170 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
2171 bool *refresh_required)
2173 DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
2174 struct ice_aqc_recipe_data_elem *tmp;
2175 u16 num_recps = ICE_MAX_NUM_RECIPES;
2176 struct ice_prot_lkup_ext *lkup_exts;
2181 bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
2183 /* we need a buffer big enough to accommodate all the recipes */
2184 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
2188 tmp[0].recipe_indx = rid;
2189 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
2190 /* non-zero status meaning recipe doesn't exist */
2194 /* Get recipe to profile map so that we can get the fv from lkups that
2195 * we read for a recipe from FW. Since we want to minimize the number of
2196 * times we make this FW call, just make one call and cache the copy
2197 * until a new recipe is added. This operation is only required the
2198 * first time to get the changes from FW. Then to search existing
2199 * entries we don't need to update the cache again until another recipe
2202 if (*refresh_required) {
2203 ice_get_recp_to_prof_map(hw);
2204 *refresh_required = false;
2207 /* Start populating all the entries for recps[rid] based on lkups from
2208 * firmware. Note that we are only creating the root recipe in our
2211 lkup_exts = &recps[rid].lkup_exts;
2213 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
2214 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
2215 struct ice_recp_grp_entry *rg_entry;
2216 u8 i, prof, idx, prot = 0;
2220 rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
2227 idx = root_bufs.recipe_indx;
2228 is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
2230 /* Mark all result indices in this chain */
2231 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
2232 set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
2235 /* get the first profile that is associated with rid */
2236 prof = find_first_bit(recipe_to_profile[idx],
2237 ICE_MAX_NUM_PROFILES);
2238 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
2239 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
2241 rg_entry->fv_idx[i] = lkup_indx;
2242 rg_entry->fv_mask[i] =
2243 le16_to_cpu(root_bufs.content.mask[i + 1]);
2245 /* If the recipe is a chained recipe then all its
2246 * child recipe's result will have a result index.
2247 * To fill fv_words we should not use those result
2248 * index, we only need the protocol ids and offsets.
2249 * We will skip all the fv_idx which stores result
2250 * index in them. We also need to skip any fv_idx which
2251 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
2252 * valid offset value.
2254 if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
2255 rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
2256 rg_entry->fv_idx[i] == 0)
2259 ice_find_prot_off(hw, ICE_BLK_SW, prof,
2260 rg_entry->fv_idx[i], &prot, &off);
2261 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
2262 lkup_exts->fv_words[fv_word_idx].off = off;
2263 lkup_exts->field_mask[fv_word_idx] =
2264 rg_entry->fv_mask[i];
2267 /* populate rg_list with the data from the child entry of this
2270 list_add(&rg_entry->l_entry, &recps[rid].rg_list);
2272 /* Propagate some data to the recipe database */
2273 recps[idx].is_root = !!is_root;
2274 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2275 bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
2276 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
2277 recps[idx].chain_idx = root_bufs.content.result_indx &
2278 ~ICE_AQ_RECIPE_RESULT_EN;
2279 set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
2281 recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
2287 /* Only do the following for root recipes entries */
2288 memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
2289 sizeof(recps[idx].r_bitmap));
2290 recps[idx].root_rid = root_bufs.content.rid &
2291 ~ICE_AQ_RECIPE_ID_IS_ROOT;
2292 recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
2295 /* Complete initialization of the root recipe entry */
2296 lkup_exts->n_val_words = fv_word_idx;
2297 recps[rid].big_recp = (num_recps > 1);
2298 recps[rid].n_grp_count = (u8)num_recps;
2299 recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
2300 recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
2302 if (!recps[rid].root_buf) {
2307 /* Copy result indexes */
2308 bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
2309 recps[rid].recp_created = true;
2316 /* ice_init_port_info - Initialize port_info with switch configuration data
2317 * @pi: pointer to port_info
2318 * @vsi_port_num: VSI number or port number
2319 * @type: Type of switch element (port or VSI)
2320 * @swid: switch ID of the switch the element is attached to
2321 * @pf_vf_num: PF or VF number
2322 * @is_vf: true if the element is a VF, false otherwise
2325 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
2326 u16 swid, u16 pf_vf_num, bool is_vf)
2329 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
2330 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
2332 pi->pf_vf_num = pf_vf_num;
2336 ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
2341 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
2342 * @hw: pointer to the hardware structure
2344 int ice_get_initial_sw_cfg(struct ice_hw *hw)
2346 struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
2352 rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
2356 /* Multiple calls to ice_aq_get_sw_cfg may be required
2357 * to get all the switch configuration information. The need
2358 * for additional calls is indicated by ice_aq_get_sw_cfg
2359 * writing a non-zero value in req_desc
2362 struct ice_aqc_get_sw_cfg_resp_elem *ele;
2364 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
2365 &req_desc, &num_elems, NULL);
2370 for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
2371 u16 pf_vf_num, swid, vsi_port_num;
2375 vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
2376 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
2378 pf_vf_num = le16_to_cpu(ele->pf_vf_num) &
2379 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
2381 swid = le16_to_cpu(ele->swid);
2383 if (le16_to_cpu(ele->pf_vf_num) &
2384 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
2387 res_type = (u8)(le16_to_cpu(ele->vsi_port_num) >>
2388 ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
2390 if (res_type == ICE_AQC_GET_SW_CONF_RESP_VSI) {
2391 /* FW VSI is not needed. Just continue. */
2395 ice_init_port_info(hw->port_info, vsi_port_num,
2396 res_type, swid, pf_vf_num, is_vf);
2398 } while (req_desc && !status);
2405 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
2406 * @hw: pointer to the hardware structure
2407 * @fi: filter info structure to fill/update
2409 * This helper function populates the lb_en and lan_en elements of the provided
2410 * ice_fltr_info struct using the switch's type and characteristics of the
2411 * switch rule being configured.
2413 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
2417 if ((fi->flag & ICE_FLTR_TX) &&
2418 (fi->fltr_act == ICE_FWD_TO_VSI ||
2419 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2420 fi->fltr_act == ICE_FWD_TO_Q ||
2421 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2422 /* Setting LB for prune actions will result in replicated
2423 * packets to the internal switch that will be dropped.
2425 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
2428 /* Set lan_en to TRUE if
2429 * 1. The switch is a VEB AND
2431 * 2.1 The lookup is a directional lookup like ethertype,
2432 * promiscuous, ethertype-MAC, promiscuous-VLAN
2433 * and default-port OR
2434 * 2.2 The lookup is VLAN, OR
2435 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
2436 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
2440 * The switch is a VEPA.
2442 * In all other cases, the LAN enable has to be set to false.
2445 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2446 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
2447 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2448 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2449 fi->lkup_type == ICE_SW_LKUP_DFLT ||
2450 fi->lkup_type == ICE_SW_LKUP_VLAN ||
2451 (fi->lkup_type == ICE_SW_LKUP_MAC &&
2452 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
2453 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
2454 !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
2463 * ice_fill_sw_rule - Helper function to fill switch rule structure
2464 * @hw: pointer to the hardware structure
2465 * @f_info: entry containing packet forwarding information
2466 * @s_rule: switch rule structure to be filled in based on mac_entry
2467 * @opc: switch rules population command type - pass in the command opcode
2470 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
2471 struct ice_sw_rule_lkup_rx_tx *s_rule,
2472 enum ice_adminq_opc opc)
2474 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
2475 u16 vlan_tpid = ETH_P_8021Q;
2483 if (opc == ice_aqc_opc_remove_sw_rules) {
2485 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2486 s_rule->hdr_len = 0;
2490 eth_hdr_sz = sizeof(dummy_eth_header);
2491 eth_hdr = s_rule->hdr_data;
2493 /* initialize the ether header with a dummy header */
2494 memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
2495 ice_fill_sw_info(hw, f_info);
2497 switch (f_info->fltr_act) {
2498 case ICE_FWD_TO_VSI:
2499 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
2500 ICE_SINGLE_ACT_VSI_ID_M;
2501 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2502 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2503 ICE_SINGLE_ACT_VALID_BIT;
2505 case ICE_FWD_TO_VSI_LIST:
2506 act |= ICE_SINGLE_ACT_VSI_LIST;
2507 act |= (f_info->fwd_id.vsi_list_id <<
2508 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
2509 ICE_SINGLE_ACT_VSI_LIST_ID_M;
2510 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
2511 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
2512 ICE_SINGLE_ACT_VALID_BIT;
2515 act |= ICE_SINGLE_ACT_TO_Q;
2516 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2517 ICE_SINGLE_ACT_Q_INDEX_M;
2519 case ICE_DROP_PACKET:
2520 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
2521 ICE_SINGLE_ACT_VALID_BIT;
2523 case ICE_FWD_TO_QGRP:
2524 q_rgn = f_info->qgrp_size > 0 ?
2525 (u8)ilog2(f_info->qgrp_size) : 0;
2526 act |= ICE_SINGLE_ACT_TO_Q;
2527 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
2528 ICE_SINGLE_ACT_Q_INDEX_M;
2529 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
2530 ICE_SINGLE_ACT_Q_REGION_M;
2537 act |= ICE_SINGLE_ACT_LB_ENABLE;
2539 act |= ICE_SINGLE_ACT_LAN_ENABLE;
2541 switch (f_info->lkup_type) {
2542 case ICE_SW_LKUP_MAC:
2543 daddr = f_info->l_data.mac.mac_addr;
2545 case ICE_SW_LKUP_VLAN:
2546 vlan_id = f_info->l_data.vlan.vlan_id;
2547 if (f_info->l_data.vlan.tpid_valid)
2548 vlan_tpid = f_info->l_data.vlan.tpid;
2549 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
2550 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
2551 act |= ICE_SINGLE_ACT_PRUNE;
2552 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
2555 case ICE_SW_LKUP_ETHERTYPE_MAC:
2556 daddr = f_info->l_data.ethertype_mac.mac_addr;
2558 case ICE_SW_LKUP_ETHERTYPE:
2559 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2560 *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype);
2562 case ICE_SW_LKUP_MAC_VLAN:
2563 daddr = f_info->l_data.mac_vlan.mac_addr;
2564 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2566 case ICE_SW_LKUP_PROMISC_VLAN:
2567 vlan_id = f_info->l_data.mac_vlan.vlan_id;
2569 case ICE_SW_LKUP_PROMISC:
2570 daddr = f_info->l_data.mac_vlan.mac_addr;
2576 s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
2577 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
2578 cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
2580 /* Recipe set depending on lookup type */
2581 s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
2582 s_rule->src = cpu_to_le16(f_info->src);
2583 s_rule->act = cpu_to_le32(act);
2586 ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
2588 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
2589 off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
2590 *off = cpu_to_be16(vlan_id);
2591 off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
2592 *off = cpu_to_be16(vlan_tpid);
2595 /* Create the switch rule with the final dummy Ethernet header */
2596 if (opc != ice_aqc_opc_update_sw_rules)
2597 s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
2601 * ice_add_marker_act
2602 * @hw: pointer to the hardware structure
2603 * @m_ent: the management entry for which sw marker needs to be added
2604 * @sw_marker: sw marker to tag the Rx descriptor with
2605 * @l_id: large action resource ID
2607 * Create a large action to hold software marker and update the switch rule
2608 * entry pointed by m_ent with newly created large action
2611 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
2612 u16 sw_marker, u16 l_id)
2614 struct ice_sw_rule_lkup_rx_tx *rx_tx;
2615 struct ice_sw_rule_lg_act *lg_act;
2616 /* For software marker we need 3 large actions
2617 * 1. FWD action: FWD TO VSI or VSI LIST
2618 * 2. GENERIC VALUE action to hold the profile ID
2619 * 3. GENERIC VALUE action to hold the software marker ID
2621 const u16 num_lg_acts = 3;
2628 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
2631 /* Create two back-to-back switch rules and submit them to the HW using
2632 * one memory buffer:
2636 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
2637 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
2638 lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
2642 rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
2644 /* Fill in the first switch rule i.e. large action */
2645 lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
2646 lg_act->index = cpu_to_le16(l_id);
2647 lg_act->size = cpu_to_le16(num_lg_acts);
2649 /* First action VSI forwarding or VSI list forwarding depending on how
2652 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
2653 m_ent->fltr_info.fwd_id.hw_vsi_id;
2655 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
2656 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
2657 if (m_ent->vsi_count > 1)
2658 act |= ICE_LG_ACT_VSI_LIST;
2659 lg_act->act[0] = cpu_to_le32(act);
2661 /* Second action descriptor type */
2662 act = ICE_LG_ACT_GENERIC;
2664 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
2665 lg_act->act[1] = cpu_to_le32(act);
2667 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
2668 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
2670 /* Third action Marker value */
2671 act |= ICE_LG_ACT_GENERIC;
2672 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
2673 ICE_LG_ACT_GENERIC_VALUE_M;
2675 lg_act->act[2] = cpu_to_le32(act);
2677 /* call the fill switch rule to fill the lookup Tx Rx structure */
2678 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
2679 ice_aqc_opc_update_sw_rules);
2681 /* Update the action to point to the large action ID */
2682 rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
2683 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
2684 ICE_SINGLE_ACT_PTR_VAL_M));
2686 /* Use the filter rule ID of the previously created rule with single
2687 * act. Once the update happens, hardware will treat this as large
2690 rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
2692 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
2693 ice_aqc_opc_update_sw_rules, NULL);
2695 m_ent->lg_act_idx = l_id;
2696 m_ent->sw_marker_id = sw_marker;
2699 devm_kfree(ice_hw_to_dev(hw), lg_act);
2704 * ice_create_vsi_list_map
2705 * @hw: pointer to the hardware structure
2706 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
2707 * @num_vsi: number of VSI handles in the array
2708 * @vsi_list_id: VSI list ID generated as part of allocate resource
2710 * Helper function to create a new entry of VSI list ID to VSI mapping
2711 * using the given VSI list ID
2713 static struct ice_vsi_list_map_info *
2714 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2717 struct ice_switch_info *sw = hw->switch_info;
2718 struct ice_vsi_list_map_info *v_map;
2721 v_map = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*v_map), GFP_KERNEL);
2725 v_map->vsi_list_id = vsi_list_id;
2727 for (i = 0; i < num_vsi; i++)
2728 set_bit(vsi_handle_arr[i], v_map->vsi_map);
2730 list_add(&v_map->list_entry, &sw->vsi_list_map_head);
2735 * ice_update_vsi_list_rule
2736 * @hw: pointer to the hardware structure
2737 * @vsi_handle_arr: array of VSI handles to form a VSI list
2738 * @num_vsi: number of VSI handles in the array
2739 * @vsi_list_id: VSI list ID generated as part of allocate resource
2740 * @remove: Boolean value to indicate if this is a remove action
2741 * @opc: switch rules population command type - pass in the command opcode
2742 * @lkup_type: lookup type of the filter
2744 * Call AQ command to add a new switch rule or update existing switch rule
2745 * using the given VSI list ID
2748 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2749 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
2750 enum ice_sw_lkup_type lkup_type)
2752 struct ice_sw_rule_vsi_list *s_rule;
2761 if (lkup_type == ICE_SW_LKUP_MAC ||
2762 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
2763 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
2764 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
2765 lkup_type == ICE_SW_LKUP_PROMISC ||
2766 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
2767 lkup_type == ICE_SW_LKUP_DFLT)
2768 rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
2769 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
2770 else if (lkup_type == ICE_SW_LKUP_VLAN)
2771 rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
2772 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
2776 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
2777 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
2780 for (i = 0; i < num_vsi; i++) {
2781 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2785 /* AQ call requires hw_vsi_id(s) */
2787 cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2790 s_rule->hdr.type = cpu_to_le16(rule_type);
2791 s_rule->number_vsi = cpu_to_le16(num_vsi);
2792 s_rule->index = cpu_to_le16(vsi_list_id);
2794 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2797 devm_kfree(ice_hw_to_dev(hw), s_rule);
2802 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2803 * @hw: pointer to the HW struct
2804 * @vsi_handle_arr: array of VSI handles to form a VSI list
2805 * @num_vsi: number of VSI handles in the array
2806 * @vsi_list_id: stores the ID of the VSI list to be created
2807 * @lkup_type: switch rule filter's lookup type
2810 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2811 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2815 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2816 ice_aqc_opc_alloc_res);
2820 /* Update the newly created VSI list to include the specified VSIs */
2821 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2822 *vsi_list_id, false,
2823 ice_aqc_opc_add_sw_rules, lkup_type);
2827 * ice_create_pkt_fwd_rule
2828 * @hw: pointer to the hardware structure
2829 * @f_entry: entry containing packet forwarding information
2831 * Create switch rule with given filter information and add an entry
2832 * to the corresponding filter management list to track this switch rule
2836 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2837 struct ice_fltr_list_entry *f_entry)
2839 struct ice_fltr_mgmt_list_entry *fm_entry;
2840 struct ice_sw_rule_lkup_rx_tx *s_rule;
2841 enum ice_sw_lkup_type l_type;
2842 struct ice_sw_recipe *recp;
2845 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2846 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2850 fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
2854 goto ice_create_pkt_fwd_rule_exit;
2857 fm_entry->fltr_info = f_entry->fltr_info;
2859 /* Initialize all the fields for the management entry */
2860 fm_entry->vsi_count = 1;
2861 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2862 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2863 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2865 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2866 ice_aqc_opc_add_sw_rules);
2868 status = ice_aq_sw_rules(hw, s_rule,
2869 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2870 ice_aqc_opc_add_sw_rules, NULL);
2872 devm_kfree(ice_hw_to_dev(hw), fm_entry);
2873 goto ice_create_pkt_fwd_rule_exit;
2876 f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2877 fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
2879 /* The book keeping entries will get removed when base driver
2880 * calls remove filter AQ command
2882 l_type = fm_entry->fltr_info.lkup_type;
2883 recp = &hw->switch_info->recp_list[l_type];
2884 list_add(&fm_entry->list_entry, &recp->filt_rules);
2886 ice_create_pkt_fwd_rule_exit:
2887 devm_kfree(ice_hw_to_dev(hw), s_rule);
2892 * ice_update_pkt_fwd_rule
2893 * @hw: pointer to the hardware structure
2894 * @f_info: filter information for switch rule
2896 * Call AQ command to update a previously created switch rule with a
2900 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2902 struct ice_sw_rule_lkup_rx_tx *s_rule;
2905 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
2906 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
2911 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2913 s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
2915 /* Update switch rule with new rule set to forward VSI list */
2916 status = ice_aq_sw_rules(hw, s_rule,
2917 ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
2918 ice_aqc_opc_update_sw_rules, NULL);
2920 devm_kfree(ice_hw_to_dev(hw), s_rule);
2925 * ice_update_sw_rule_bridge_mode
2926 * @hw: pointer to the HW struct
2928 * Updates unicast switch filter rules based on VEB/VEPA mode
2930 int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2932 struct ice_switch_info *sw = hw->switch_info;
2933 struct ice_fltr_mgmt_list_entry *fm_entry;
2934 struct list_head *rule_head;
2935 struct mutex *rule_lock; /* Lock to protect filter rule list */
2938 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2939 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2941 mutex_lock(rule_lock);
2942 list_for_each_entry(fm_entry, rule_head, list_entry) {
2943 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2944 u8 *addr = fi->l_data.mac.mac_addr;
2946 /* Update unicast Tx rules to reflect the selected
2949 if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) &&
2950 (fi->fltr_act == ICE_FWD_TO_VSI ||
2951 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2952 fi->fltr_act == ICE_FWD_TO_Q ||
2953 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2954 status = ice_update_pkt_fwd_rule(hw, fi);
2960 mutex_unlock(rule_lock);
2966 * ice_add_update_vsi_list
2967 * @hw: pointer to the hardware structure
2968 * @m_entry: pointer to current filter management list entry
2969 * @cur_fltr: filter information from the book keeping entry
2970 * @new_fltr: filter information with the new VSI to be added
2972 * Call AQ command to add or update previously created VSI list with new VSI.
2974 * Helper function to do book keeping associated with adding filter information
2975 * The algorithm to do the book keeping is described below :
2976 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2977 * if only one VSI has been added till now
2978 * Allocate a new VSI list and add two VSIs
2979 * to this list using switch rule command
2980 * Update the previously created switch rule with the
2981 * newly created VSI list ID
2982 * if a VSI list was previously created
2983 * Add the new VSI to the previously created VSI list set
2984 * using the update switch rule command
2987 ice_add_update_vsi_list(struct ice_hw *hw,
2988 struct ice_fltr_mgmt_list_entry *m_entry,
2989 struct ice_fltr_info *cur_fltr,
2990 struct ice_fltr_info *new_fltr)
2992 u16 vsi_list_id = 0;
2995 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2996 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2999 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
3000 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
3001 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
3002 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
3005 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
3006 /* Only one entry existed in the mapping and it was not already
3007 * a part of a VSI list. So, create a VSI list with the old and
3010 struct ice_fltr_info tmp_fltr;
3011 u16 vsi_handle_arr[2];
3013 /* A rule already exists with the new VSI being added */
3014 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
3017 vsi_handle_arr[0] = cur_fltr->vsi_handle;
3018 vsi_handle_arr[1] = new_fltr->vsi_handle;
3019 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3021 new_fltr->lkup_type);
3025 tmp_fltr = *new_fltr;
3026 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
3027 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3028 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3029 /* Update the previous switch rule of "MAC forward to VSI" to
3030 * "MAC fwd to VSI list"
3032 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3036 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
3037 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3038 m_entry->vsi_list_info =
3039 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3042 if (!m_entry->vsi_list_info)
3045 /* If this entry was large action then the large action needs
3046 * to be updated to point to FWD to VSI list
3048 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
3050 ice_add_marker_act(hw, m_entry,
3051 m_entry->sw_marker_id,
3052 m_entry->lg_act_idx);
3054 u16 vsi_handle = new_fltr->vsi_handle;
3055 enum ice_adminq_opc opcode;
3057 if (!m_entry->vsi_list_info)
3060 /* A rule already exists with the new VSI being added */
3061 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
3064 /* Update the previously created VSI list set with
3065 * the new VSI ID passed in
3067 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
3068 opcode = ice_aqc_opc_update_sw_rules;
3070 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
3071 vsi_list_id, false, opcode,
3072 new_fltr->lkup_type);
3073 /* update VSI list mapping info with new VSI ID */
3075 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
3078 m_entry->vsi_count++;
3083 * ice_find_rule_entry - Search a rule entry
3084 * @hw: pointer to the hardware structure
3085 * @recp_id: lookup type for which the specified rule needs to be searched
3086 * @f_info: rule information
3088 * Helper function to search for a given rule entry
3089 * Returns pointer to entry storing the rule if found
3091 static struct ice_fltr_mgmt_list_entry *
3092 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
3094 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
3095 struct ice_switch_info *sw = hw->switch_info;
3096 struct list_head *list_head;
3098 list_head = &sw->recp_list[recp_id].filt_rules;
3099 list_for_each_entry(list_itr, list_head, list_entry) {
3100 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3101 sizeof(f_info->l_data)) &&
3102 f_info->flag == list_itr->fltr_info.flag) {
3111 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
3112 * @hw: pointer to the hardware structure
3113 * @recp_id: lookup type for which VSI lists needs to be searched
3114 * @vsi_handle: VSI handle to be found in VSI list
3115 * @vsi_list_id: VSI list ID found containing vsi_handle
3117 * Helper function to search a VSI list with single entry containing given VSI
3118 * handle element. This can be extended further to search VSI list with more
3119 * than 1 vsi_count. Returns pointer to VSI list entry if found.
3121 static struct ice_vsi_list_map_info *
3122 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
3125 struct ice_vsi_list_map_info *map_info = NULL;
3126 struct ice_switch_info *sw = hw->switch_info;
3127 struct ice_fltr_mgmt_list_entry *list_itr;
3128 struct list_head *list_head;
3130 list_head = &sw->recp_list[recp_id].filt_rules;
3131 list_for_each_entry(list_itr, list_head, list_entry) {
3132 if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
3133 map_info = list_itr->vsi_list_info;
3134 if (test_bit(vsi_handle, map_info->vsi_map)) {
3135 *vsi_list_id = map_info->vsi_list_id;
3144 * ice_add_rule_internal - add rule for a given lookup type
3145 * @hw: pointer to the hardware structure
3146 * @recp_id: lookup type (recipe ID) for which rule has to be added
3147 * @f_entry: structure containing MAC forwarding information
3149 * Adds or updates the rule lists for a given recipe
3152 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
3153 struct ice_fltr_list_entry *f_entry)
3155 struct ice_switch_info *sw = hw->switch_info;
3156 struct ice_fltr_info *new_fltr, *cur_fltr;
3157 struct ice_fltr_mgmt_list_entry *m_entry;
3158 struct mutex *rule_lock; /* Lock to protect filter rule list */
3161 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3163 f_entry->fltr_info.fwd_id.hw_vsi_id =
3164 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3166 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3168 mutex_lock(rule_lock);
3169 new_fltr = &f_entry->fltr_info;
3170 if (new_fltr->flag & ICE_FLTR_RX)
3171 new_fltr->src = hw->port_info->lport;
3172 else if (new_fltr->flag & ICE_FLTR_TX)
3173 new_fltr->src = f_entry->fltr_info.fwd_id.hw_vsi_id;
3175 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
3177 mutex_unlock(rule_lock);
3178 return ice_create_pkt_fwd_rule(hw, f_entry);
3181 cur_fltr = &m_entry->fltr_info;
3182 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
3183 mutex_unlock(rule_lock);
3189 * ice_remove_vsi_list_rule
3190 * @hw: pointer to the hardware structure
3191 * @vsi_list_id: VSI list ID generated as part of allocate resource
3192 * @lkup_type: switch rule filter lookup type
3194 * The VSI list should be emptied before this function is called to remove the
3198 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
3199 enum ice_sw_lkup_type lkup_type)
3201 struct ice_sw_rule_vsi_list *s_rule;
3205 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
3206 s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
3210 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
3211 s_rule->index = cpu_to_le16(vsi_list_id);
3213 /* Free the vsi_list resource that we allocated. It is assumed that the
3214 * list is empty at this point.
3216 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
3217 ice_aqc_opc_free_res);
3219 devm_kfree(ice_hw_to_dev(hw), s_rule);
3224 * ice_rem_update_vsi_list
3225 * @hw: pointer to the hardware structure
3226 * @vsi_handle: VSI handle of the VSI to remove
3227 * @fm_list: filter management entry for which the VSI list management needs to
3231 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
3232 struct ice_fltr_mgmt_list_entry *fm_list)
3234 enum ice_sw_lkup_type lkup_type;
3238 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
3239 fm_list->vsi_count == 0)
3242 /* A rule with the VSI being removed does not exist */
3243 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
3246 lkup_type = fm_list->fltr_info.lkup_type;
3247 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
3248 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
3249 ice_aqc_opc_update_sw_rules,
3254 fm_list->vsi_count--;
3255 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
3257 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
3258 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
3259 struct ice_vsi_list_map_info *vsi_list_info =
3260 fm_list->vsi_list_info;
3263 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
3265 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
3268 /* Make sure VSI list is empty before removing it below */
3269 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
3271 ice_aqc_opc_update_sw_rules,
3276 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
3277 tmp_fltr_info.fwd_id.hw_vsi_id =
3278 ice_get_hw_vsi_num(hw, rem_vsi_handle);
3279 tmp_fltr_info.vsi_handle = rem_vsi_handle;
3280 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
3282 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
3283 tmp_fltr_info.fwd_id.hw_vsi_id, status);
3287 fm_list->fltr_info = tmp_fltr_info;
3290 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
3291 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
3292 struct ice_vsi_list_map_info *vsi_list_info =
3293 fm_list->vsi_list_info;
3295 /* Remove the VSI list since it is no longer used */
3296 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
3298 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
3299 vsi_list_id, status);
3303 list_del(&vsi_list_info->list_entry);
3304 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
3305 fm_list->vsi_list_info = NULL;
3312 * ice_remove_rule_internal - Remove a filter rule of a given type
3313 * @hw: pointer to the hardware structure
3314 * @recp_id: recipe ID for which the rule needs to removed
3315 * @f_entry: rule entry containing filter information
3318 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3319 struct ice_fltr_list_entry *f_entry)
3321 struct ice_switch_info *sw = hw->switch_info;
3322 struct ice_fltr_mgmt_list_entry *list_elem;
3323 struct mutex *rule_lock; /* Lock to protect filter rule list */
3324 bool remove_rule = false;
3328 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3330 f_entry->fltr_info.fwd_id.hw_vsi_id =
3331 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3333 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
3334 mutex_lock(rule_lock);
3335 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
3341 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
3343 } else if (!list_elem->vsi_list_info) {
3346 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3347 /* a ref_cnt > 1 indicates that the vsi_list is being
3348 * shared by multiple rules. Decrement the ref_cnt and
3349 * remove this rule, but do not modify the list, as it
3350 * is in-use by other rules.
3352 list_elem->vsi_list_info->ref_cnt--;
3355 /* a ref_cnt of 1 indicates the vsi_list is only used
3356 * by one rule. However, the original removal request is only
3357 * for a single VSI. Update the vsi_list first, and only
3358 * remove the rule if there are no further VSIs in this list.
3360 vsi_handle = f_entry->fltr_info.vsi_handle;
3361 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3364 /* if VSI count goes to zero after updating the VSI list */
3365 if (list_elem->vsi_count == 0)
3370 /* Remove the lookup rule */
3371 struct ice_sw_rule_lkup_rx_tx *s_rule;
3373 s_rule = devm_kzalloc(ice_hw_to_dev(hw),
3374 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3381 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
3382 ice_aqc_opc_remove_sw_rules);
3384 status = ice_aq_sw_rules(hw, s_rule,
3385 ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
3386 1, ice_aqc_opc_remove_sw_rules, NULL);
3388 /* Remove a book keeping from the list */
3389 devm_kfree(ice_hw_to_dev(hw), s_rule);
3394 list_del(&list_elem->list_entry);
3395 devm_kfree(ice_hw_to_dev(hw), list_elem);
3398 mutex_unlock(rule_lock);
3403 * ice_mac_fltr_exist - does this MAC filter exist for given VSI
3404 * @hw: pointer to the hardware structure
3405 * @mac: MAC address to be checked (for MAC filter)
3406 * @vsi_handle: check MAC filter for this VSI
3408 bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
3410 struct ice_fltr_mgmt_list_entry *entry;
3411 struct list_head *rule_head;
3412 struct ice_switch_info *sw;
3413 struct mutex *rule_lock; /* Lock to protect filter rule list */
3416 if (!ice_is_vsi_valid(hw, vsi_handle))
3419 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3420 sw = hw->switch_info;
3421 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
3425 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3426 mutex_lock(rule_lock);
3427 list_for_each_entry(entry, rule_head, list_entry) {
3428 struct ice_fltr_info *f_info = &entry->fltr_info;
3429 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
3431 if (is_zero_ether_addr(mac_addr))
3434 if (f_info->flag != ICE_FLTR_TX ||
3435 f_info->src_id != ICE_SRC_ID_VSI ||
3436 f_info->lkup_type != ICE_SW_LKUP_MAC ||
3437 f_info->fltr_act != ICE_FWD_TO_VSI ||
3438 hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3441 if (ether_addr_equal(mac, mac_addr)) {
3442 mutex_unlock(rule_lock);
3446 mutex_unlock(rule_lock);
3451 * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
3452 * @hw: pointer to the hardware structure
3454 * @vsi_handle: check MAC filter for this VSI
3456 bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
3458 struct ice_fltr_mgmt_list_entry *entry;
3459 struct list_head *rule_head;
3460 struct ice_switch_info *sw;
3461 struct mutex *rule_lock; /* Lock to protect filter rule list */
3464 if (vlan_id > ICE_MAX_VLAN_ID)
3467 if (!ice_is_vsi_valid(hw, vsi_handle))
3470 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3471 sw = hw->switch_info;
3472 rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3476 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3477 mutex_lock(rule_lock);
3478 list_for_each_entry(entry, rule_head, list_entry) {
3479 struct ice_fltr_info *f_info = &entry->fltr_info;
3480 u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
3481 struct ice_vsi_list_map_info *map_info;
3483 if (entry_vlan_id > ICE_MAX_VLAN_ID)
3486 if (f_info->flag != ICE_FLTR_TX ||
3487 f_info->src_id != ICE_SRC_ID_VSI ||
3488 f_info->lkup_type != ICE_SW_LKUP_VLAN)
3491 /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
3492 if (f_info->fltr_act != ICE_FWD_TO_VSI &&
3493 f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
3496 if (f_info->fltr_act == ICE_FWD_TO_VSI) {
3497 if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
3499 } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
3500 /* If filter_action is FWD_TO_VSI_LIST, make sure
3501 * that VSI being checked is part of VSI list
3503 if (entry->vsi_count == 1 &&
3504 entry->vsi_list_info) {
3505 map_info = entry->vsi_list_info;
3506 if (!test_bit(vsi_handle, map_info->vsi_map))
3511 if (vlan_id == entry_vlan_id) {
3512 mutex_unlock(rule_lock);
3516 mutex_unlock(rule_lock);
3522 * ice_add_mac - Add a MAC address based filter rule
3523 * @hw: pointer to the hardware structure
3524 * @m_list: list of MAC addresses and forwarding information
3526 int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
3528 struct ice_fltr_list_entry *m_list_itr;
3534 list_for_each_entry(m_list_itr, m_list, list_entry) {
3535 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
3539 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
3540 vsi_handle = m_list_itr->fltr_info.vsi_handle;
3541 if (!ice_is_vsi_valid(hw, vsi_handle))
3543 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3544 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
3545 /* update the src in case it is VSI num */
3546 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
3548 m_list_itr->fltr_info.src = hw_vsi_id;
3549 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
3550 is_zero_ether_addr(add))
3553 m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
3555 if (m_list_itr->status)
3556 return m_list_itr->status;
3563 * ice_add_vlan_internal - Add one VLAN based filter rule
3564 * @hw: pointer to the hardware structure
3565 * @f_entry: filter entry containing one VLAN information
3568 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
3570 struct ice_switch_info *sw = hw->switch_info;
3571 struct ice_fltr_mgmt_list_entry *v_list_itr;
3572 struct ice_fltr_info *new_fltr, *cur_fltr;
3573 enum ice_sw_lkup_type lkup_type;
3574 u16 vsi_list_id = 0, vsi_handle;
3575 struct mutex *rule_lock; /* Lock to protect filter rule list */
3578 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
3581 f_entry->fltr_info.fwd_id.hw_vsi_id =
3582 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
3583 new_fltr = &f_entry->fltr_info;
3585 /* VLAN ID should only be 12 bits */
3586 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
3589 if (new_fltr->src_id != ICE_SRC_ID_VSI)
3592 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
3593 lkup_type = new_fltr->lkup_type;
3594 vsi_handle = new_fltr->vsi_handle;
3595 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3596 mutex_lock(rule_lock);
3597 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
3599 struct ice_vsi_list_map_info *map_info = NULL;
3601 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
3602 /* All VLAN pruning rules use a VSI list. Check if
3603 * there is already a VSI list containing VSI that we
3604 * want to add. If found, use the same vsi_list_id for
3605 * this new VLAN rule or else create a new list.
3607 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
3611 status = ice_create_vsi_list_rule(hw,
3619 /* Convert the action to forwarding to a VSI list. */
3620 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
3621 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
3624 status = ice_create_pkt_fwd_rule(hw, f_entry);
3626 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
3632 /* reuse VSI list for new rule and increment ref_cnt */
3634 v_list_itr->vsi_list_info = map_info;
3635 map_info->ref_cnt++;
3637 v_list_itr->vsi_list_info =
3638 ice_create_vsi_list_map(hw, &vsi_handle,
3642 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
3643 /* Update existing VSI list to add new VSI ID only if it used
3646 cur_fltr = &v_list_itr->fltr_info;
3647 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
3650 /* If VLAN rule exists and VSI list being used by this rule is
3651 * referenced by more than 1 VLAN rule. Then create a new VSI
3652 * list appending previous VSI with new VSI and update existing
3653 * VLAN rule to point to new VSI list ID
3655 struct ice_fltr_info tmp_fltr;
3656 u16 vsi_handle_arr[2];
3659 /* Current implementation only supports reusing VSI list with
3660 * one VSI count. We should never hit below condition
3662 if (v_list_itr->vsi_count > 1 &&
3663 v_list_itr->vsi_list_info->ref_cnt > 1) {
3664 ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
3670 find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3673 /* A rule already exists with the new VSI being added */
3674 if (cur_handle == vsi_handle) {
3679 vsi_handle_arr[0] = cur_handle;
3680 vsi_handle_arr[1] = vsi_handle;
3681 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3682 &vsi_list_id, lkup_type);
3686 tmp_fltr = v_list_itr->fltr_info;
3687 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3688 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3689 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3690 /* Update the previous switch rule to a new VSI list which
3691 * includes current VSI that is requested
3693 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3697 /* before overriding VSI list map info. decrement ref_cnt of
3700 v_list_itr->vsi_list_info->ref_cnt--;
3702 /* now update to newly created list */
3703 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3704 v_list_itr->vsi_list_info =
3705 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3707 v_list_itr->vsi_count++;
3711 mutex_unlock(rule_lock);
3716 * ice_add_vlan - Add VLAN based filter rule
3717 * @hw: pointer to the hardware structure
3718 * @v_list: list of VLAN entries and forwarding information
3720 int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
3722 struct ice_fltr_list_entry *v_list_itr;
3727 list_for_each_entry(v_list_itr, v_list, list_entry) {
3728 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3730 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3731 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3732 if (v_list_itr->status)
3733 return v_list_itr->status;
3739 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3740 * @hw: pointer to the hardware structure
3741 * @em_list: list of ether type MAC filter, MAC is optional
3743 * This function requires the caller to populate the entries in
3744 * the filter list with the necessary fields (including flags to
3745 * indicate Tx or Rx rules).
3747 int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3749 struct ice_fltr_list_entry *em_list_itr;
3751 if (!em_list || !hw)
3754 list_for_each_entry(em_list_itr, em_list, list_entry) {
3755 enum ice_sw_lkup_type l_type =
3756 em_list_itr->fltr_info.lkup_type;
3758 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3759 l_type != ICE_SW_LKUP_ETHERTYPE)
3762 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3764 if (em_list_itr->status)
3765 return em_list_itr->status;
3771 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3772 * @hw: pointer to the hardware structure
3773 * @em_list: list of ethertype or ethertype MAC entries
3775 int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
3777 struct ice_fltr_list_entry *em_list_itr, *tmp;
3779 if (!em_list || !hw)
3782 list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
3783 enum ice_sw_lkup_type l_type =
3784 em_list_itr->fltr_info.lkup_type;
3786 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3787 l_type != ICE_SW_LKUP_ETHERTYPE)
3790 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3792 if (em_list_itr->status)
3793 return em_list_itr->status;
3799 * ice_rem_sw_rule_info
3800 * @hw: pointer to the hardware structure
3801 * @rule_head: pointer to the switch list structure that we want to delete
3804 ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3806 if (!list_empty(rule_head)) {
3807 struct ice_fltr_mgmt_list_entry *entry;
3808 struct ice_fltr_mgmt_list_entry *tmp;
3810 list_for_each_entry_safe(entry, tmp, rule_head, list_entry) {
3811 list_del(&entry->list_entry);
3812 devm_kfree(ice_hw_to_dev(hw), entry);
3818 * ice_rem_adv_rule_info
3819 * @hw: pointer to the hardware structure
3820 * @rule_head: pointer to the switch list structure that we want to delete
3823 ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
3825 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3826 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3828 if (list_empty(rule_head))
3831 list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
3832 list_del(&lst_itr->list_entry);
3833 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
3834 devm_kfree(ice_hw_to_dev(hw), lst_itr);
3839 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3840 * @pi: pointer to the port_info structure
3841 * @vsi_handle: VSI handle to set as default
3842 * @set: true to add the above mentioned switch rule, false to remove it
3843 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3845 * add filter rule to set/unset given VSI as default VSI for the switch
3846 * (represented by swid)
3849 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3852 struct ice_fltr_list_entry f_list_entry;
3853 struct ice_fltr_info f_info;
3854 struct ice_hw *hw = pi->hw;
3858 if (!ice_is_vsi_valid(hw, vsi_handle))
3861 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3863 memset(&f_info, 0, sizeof(f_info));
3865 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3866 f_info.flag = direction;
3867 f_info.fltr_act = ICE_FWD_TO_VSI;
3868 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3869 f_info.vsi_handle = vsi_handle;
3871 if (f_info.flag & ICE_FLTR_RX) {
3872 f_info.src = hw->port_info->lport;
3873 f_info.src_id = ICE_SRC_ID_LPORT;
3874 } else if (f_info.flag & ICE_FLTR_TX) {
3875 f_info.src_id = ICE_SRC_ID_VSI;
3876 f_info.src = hw_vsi_id;
3878 f_list_entry.fltr_info = f_info;
3881 status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
3884 status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
3891 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3892 * @fm_entry: filter entry to inspect
3893 * @vsi_handle: VSI handle to compare with filter info
3896 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3898 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3899 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3900 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3901 fm_entry->vsi_list_info &&
3902 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
3906 * ice_check_if_dflt_vsi - check if VSI is default VSI
3907 * @pi: pointer to the port_info structure
3908 * @vsi_handle: vsi handle to check for in filter list
3909 * @rule_exists: indicates if there are any VSI's in the rule list
3911 * checks if the VSI is in a default VSI list, and also indicates
3912 * if the default VSI list is empty
3915 ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
3918 struct ice_fltr_mgmt_list_entry *fm_entry;
3919 struct ice_sw_recipe *recp_list;
3920 struct list_head *rule_head;
3921 struct mutex *rule_lock; /* Lock to protect filter rule list */
3924 recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
3925 rule_lock = &recp_list->filt_rule_lock;
3926 rule_head = &recp_list->filt_rules;
3928 mutex_lock(rule_lock);
3930 if (rule_exists && !list_empty(rule_head))
3931 *rule_exists = true;
3933 list_for_each_entry(fm_entry, rule_head, list_entry) {
3934 if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
3940 mutex_unlock(rule_lock);
3946 * ice_remove_mac - remove a MAC address based filter rule
3947 * @hw: pointer to the hardware structure
3948 * @m_list: list of MAC addresses and forwarding information
3950 * This function removes either a MAC filter rule or a specific VSI from a
3951 * VSI list for a multicast MAC address.
3953 * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
3954 * be aware that this call will only work if all the entries passed into m_list
3955 * were added previously. It will not attempt to do a partial remove of entries
3958 int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
3960 struct ice_fltr_list_entry *list_itr, *tmp;
3965 list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
3966 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3969 if (l_type != ICE_SW_LKUP_MAC)
3972 vsi_handle = list_itr->fltr_info.vsi_handle;
3973 if (!ice_is_vsi_valid(hw, vsi_handle))
3976 list_itr->fltr_info.fwd_id.hw_vsi_id =
3977 ice_get_hw_vsi_num(hw, vsi_handle);
3979 list_itr->status = ice_remove_rule_internal(hw,
3982 if (list_itr->status)
3983 return list_itr->status;
3989 * ice_remove_vlan - Remove VLAN based filter rule
3990 * @hw: pointer to the hardware structure
3991 * @v_list: list of VLAN entries and forwarding information
3993 int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
3995 struct ice_fltr_list_entry *v_list_itr, *tmp;
4000 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4001 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
4003 if (l_type != ICE_SW_LKUP_VLAN)
4005 v_list_itr->status = ice_remove_rule_internal(hw,
4008 if (v_list_itr->status)
4009 return v_list_itr->status;
4015 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
4016 * @hw: pointer to the hardware structure
4017 * @vsi_handle: VSI handle to remove filters from
4018 * @vsi_list_head: pointer to the list to add entry to
4019 * @fi: pointer to fltr_info of filter entry to copy & add
4021 * Helper function, used when creating a list of filters to remove from
4022 * a specific VSI. The entry added to vsi_list_head is a COPY of the
4023 * original filter entry, with the exception of fltr_info.fltr_act and
4024 * fltr_info.fwd_id fields. These are set such that later logic can
4025 * extract which VSI to remove the fltr from, and pass on that information.
4028 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4029 struct list_head *vsi_list_head,
4030 struct ice_fltr_info *fi)
4032 struct ice_fltr_list_entry *tmp;
4034 /* this memory is freed up in the caller function
4035 * once filters for this VSI are removed
4037 tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
4041 tmp->fltr_info = *fi;
4043 /* Overwrite these fields to indicate which VSI to remove filter from,
4044 * so find and remove logic can extract the information from the
4045 * list entries. Note that original entries will still have proper
4048 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
4049 tmp->fltr_info.vsi_handle = vsi_handle;
4050 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4052 list_add(&tmp->list_entry, vsi_list_head);
4058 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
4059 * @hw: pointer to the hardware structure
4060 * @vsi_handle: VSI handle to remove filters from
4061 * @lkup_list_head: pointer to the list that has certain lookup type filters
4062 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
4064 * Locates all filters in lkup_list_head that are used by the given VSI,
4065 * and adds COPIES of those entries to vsi_list_head (intended to be used
4066 * to remove the listed filters).
4067 * Note that this means all entries in vsi_list_head must be explicitly
4068 * deallocated by the caller when done with list.
4071 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
4072 struct list_head *lkup_list_head,
4073 struct list_head *vsi_list_head)
4075 struct ice_fltr_mgmt_list_entry *fm_entry;
4078 /* check to make sure VSI ID is valid and within boundary */
4079 if (!ice_is_vsi_valid(hw, vsi_handle))
4082 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
4083 if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
4086 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4088 &fm_entry->fltr_info);
4096 * ice_determine_promisc_mask
4097 * @fi: filter info to parse
4099 * Helper function to determine which ICE_PROMISC_ mask corresponds
4100 * to given filter into.
4102 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
4104 u16 vid = fi->l_data.mac_vlan.vlan_id;
4105 u8 *macaddr = fi->l_data.mac.mac_addr;
4106 bool is_tx_fltr = false;
4107 u8 promisc_mask = 0;
4109 if (fi->flag == ICE_FLTR_TX)
4112 if (is_broadcast_ether_addr(macaddr))
4113 promisc_mask |= is_tx_fltr ?
4114 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
4115 else if (is_multicast_ether_addr(macaddr))
4116 promisc_mask |= is_tx_fltr ?
4117 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
4118 else if (is_unicast_ether_addr(macaddr))
4119 promisc_mask |= is_tx_fltr ?
4120 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
4122 promisc_mask |= is_tx_fltr ?
4123 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
4125 return promisc_mask;
4129 * ice_remove_promisc - Remove promisc based filter rules
4130 * @hw: pointer to the hardware structure
4131 * @recp_id: recipe ID for which the rule needs to removed
4132 * @v_list: list of promisc entries
4135 ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
4137 struct ice_fltr_list_entry *v_list_itr, *tmp;
4139 list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
4140 v_list_itr->status =
4141 ice_remove_rule_internal(hw, recp_id, v_list_itr);
4142 if (v_list_itr->status)
4143 return v_list_itr->status;
4149 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
4150 * @hw: pointer to the hardware structure
4151 * @vsi_handle: VSI handle to clear mode
4152 * @promisc_mask: mask of promiscuous config bits to clear
4153 * @vid: VLAN ID to clear VLAN promiscuous
4156 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4159 struct ice_switch_info *sw = hw->switch_info;
4160 struct ice_fltr_list_entry *fm_entry, *tmp;
4161 struct list_head remove_list_head;
4162 struct ice_fltr_mgmt_list_entry *itr;
4163 struct list_head *rule_head;
4164 struct mutex *rule_lock; /* Lock to protect filter rule list */
4168 if (!ice_is_vsi_valid(hw, vsi_handle))
4171 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
4172 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4174 recipe_id = ICE_SW_LKUP_PROMISC;
4176 rule_head = &sw->recp_list[recipe_id].filt_rules;
4177 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
4179 INIT_LIST_HEAD(&remove_list_head);
4181 mutex_lock(rule_lock);
4182 list_for_each_entry(itr, rule_head, list_entry) {
4183 struct ice_fltr_info *fltr_info;
4184 u8 fltr_promisc_mask = 0;
4186 if (!ice_vsi_uses_fltr(itr, vsi_handle))
4188 fltr_info = &itr->fltr_info;
4190 if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
4191 vid != fltr_info->l_data.mac_vlan.vlan_id)
4194 fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
4196 /* Skip if filter is not completely specified by given mask */
4197 if (fltr_promisc_mask & ~promisc_mask)
4200 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
4204 mutex_unlock(rule_lock);
4205 goto free_fltr_list;
4208 mutex_unlock(rule_lock);
4210 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
4213 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4214 list_del(&fm_entry->list_entry);
4215 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4222 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
4223 * @hw: pointer to the hardware structure
4224 * @vsi_handle: VSI handle to configure
4225 * @promisc_mask: mask of promiscuous config bits
4226 * @vid: VLAN ID to set VLAN promiscuous
4229 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
4231 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
4232 struct ice_fltr_list_entry f_list_entry;
4233 struct ice_fltr_info new_fltr;
4240 if (!ice_is_vsi_valid(hw, vsi_handle))
4242 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4244 memset(&new_fltr, 0, sizeof(new_fltr));
4246 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
4247 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
4248 new_fltr.l_data.mac_vlan.vlan_id = vid;
4249 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
4251 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
4252 recipe_id = ICE_SW_LKUP_PROMISC;
4255 /* Separate filters must be set for each direction/packet type
4256 * combination, so we will loop over the mask value, store the
4257 * individual type, and clear it out in the input mask as it
4260 while (promisc_mask) {
4266 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
4267 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
4268 pkt_type = UCAST_FLTR;
4269 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
4270 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
4271 pkt_type = UCAST_FLTR;
4273 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
4274 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
4275 pkt_type = MCAST_FLTR;
4276 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
4277 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
4278 pkt_type = MCAST_FLTR;
4280 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
4281 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
4282 pkt_type = BCAST_FLTR;
4283 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
4284 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
4285 pkt_type = BCAST_FLTR;
4289 /* Check for VLAN promiscuous flag */
4290 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
4291 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
4292 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
4293 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
4297 /* Set filter DA based on packet type */
4298 mac_addr = new_fltr.l_data.mac.mac_addr;
4299 if (pkt_type == BCAST_FLTR) {
4300 eth_broadcast_addr(mac_addr);
4301 } else if (pkt_type == MCAST_FLTR ||
4302 pkt_type == UCAST_FLTR) {
4303 /* Use the dummy ether header DA */
4304 ether_addr_copy(mac_addr, dummy_eth_header);
4305 if (pkt_type == MCAST_FLTR)
4306 mac_addr[0] |= 0x1; /* Set multicast bit */
4309 /* Need to reset this to zero for all iterations */
4312 new_fltr.flag |= ICE_FLTR_TX;
4313 new_fltr.src = hw_vsi_id;
4315 new_fltr.flag |= ICE_FLTR_RX;
4316 new_fltr.src = hw->port_info->lport;
4319 new_fltr.fltr_act = ICE_FWD_TO_VSI;
4320 new_fltr.vsi_handle = vsi_handle;
4321 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
4322 f_list_entry.fltr_info = new_fltr;
4324 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
4326 goto set_promisc_exit;
4334 * ice_set_vlan_vsi_promisc
4335 * @hw: pointer to the hardware structure
4336 * @vsi_handle: VSI handle to configure
4337 * @promisc_mask: mask of promiscuous config bits
4338 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
4340 * Configure VSI with all associated VLANs to given promiscuous mode(s)
4343 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
4344 bool rm_vlan_promisc)
4346 struct ice_switch_info *sw = hw->switch_info;
4347 struct ice_fltr_list_entry *list_itr, *tmp;
4348 struct list_head vsi_list_head;
4349 struct list_head *vlan_head;
4350 struct mutex *vlan_lock; /* Lock to protect filter rule list */
4354 INIT_LIST_HEAD(&vsi_list_head);
4355 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
4356 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
4357 mutex_lock(vlan_lock);
4358 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
4360 mutex_unlock(vlan_lock);
4362 goto free_fltr_list;
4364 list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
4365 /* Avoid enabling or disabling VLAN zero twice when in double
4368 if (ice_is_dvm_ena(hw) &&
4369 list_itr->fltr_info.l_data.vlan.tpid == 0)
4372 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
4373 if (rm_vlan_promisc)
4374 status = ice_clear_vsi_promisc(hw, vsi_handle,
4375 promisc_mask, vlan_id);
4377 status = ice_set_vsi_promisc(hw, vsi_handle,
4378 promisc_mask, vlan_id);
4379 if (status && status != -EEXIST)
4384 list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
4385 list_del(&list_itr->list_entry);
4386 devm_kfree(ice_hw_to_dev(hw), list_itr);
4392 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
4393 * @hw: pointer to the hardware structure
4394 * @vsi_handle: VSI handle to remove filters from
4395 * @lkup: switch rule filter lookup type
4398 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
4399 enum ice_sw_lkup_type lkup)
4401 struct ice_switch_info *sw = hw->switch_info;
4402 struct ice_fltr_list_entry *fm_entry;
4403 struct list_head remove_list_head;
4404 struct list_head *rule_head;
4405 struct ice_fltr_list_entry *tmp;
4406 struct mutex *rule_lock; /* Lock to protect filter rule list */
4409 INIT_LIST_HEAD(&remove_list_head);
4410 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
4411 rule_head = &sw->recp_list[lkup].filt_rules;
4412 mutex_lock(rule_lock);
4413 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
4415 mutex_unlock(rule_lock);
4417 goto free_fltr_list;
4420 case ICE_SW_LKUP_MAC:
4421 ice_remove_mac(hw, &remove_list_head);
4423 case ICE_SW_LKUP_VLAN:
4424 ice_remove_vlan(hw, &remove_list_head);
4426 case ICE_SW_LKUP_PROMISC:
4427 case ICE_SW_LKUP_PROMISC_VLAN:
4428 ice_remove_promisc(hw, lkup, &remove_list_head);
4430 case ICE_SW_LKUP_MAC_VLAN:
4431 case ICE_SW_LKUP_ETHERTYPE:
4432 case ICE_SW_LKUP_ETHERTYPE_MAC:
4433 case ICE_SW_LKUP_DFLT:
4434 case ICE_SW_LKUP_LAST:
4436 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
4441 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
4442 list_del(&fm_entry->list_entry);
4443 devm_kfree(ice_hw_to_dev(hw), fm_entry);
4448 * ice_remove_vsi_fltr - Remove all filters for a VSI
4449 * @hw: pointer to the hardware structure
4450 * @vsi_handle: VSI handle to remove filters from
4452 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4454 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4455 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4456 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4457 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4458 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4459 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4460 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4461 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4465 * ice_alloc_res_cntr - allocating resource counter
4466 * @hw: pointer to the hardware structure
4467 * @type: type of resource
4468 * @alloc_shared: if set it is shared else dedicated
4469 * @num_items: number of entries requested for FD resource type
4470 * @counter_id: counter index returned by AQ call
4473 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4476 struct ice_aqc_alloc_free_res_elem *buf;
4480 /* Allocate resource */
4481 buf_len = struct_size(buf, elem, 1);
4482 buf = kzalloc(buf_len, GFP_KERNEL);
4486 buf->num_elems = cpu_to_le16(num_items);
4487 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4488 ICE_AQC_RES_TYPE_M) | alloc_shared);
4490 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4491 ice_aqc_opc_alloc_res, NULL);
4495 *counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
4503 * ice_free_res_cntr - free resource counter
4504 * @hw: pointer to the hardware structure
4505 * @type: type of resource
4506 * @alloc_shared: if set it is shared else dedicated
4507 * @num_items: number of entries to be freed for FD resource type
4508 * @counter_id: counter ID resource which needs to be freed
4511 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4514 struct ice_aqc_alloc_free_res_elem *buf;
4519 buf_len = struct_size(buf, elem, 1);
4520 buf = kzalloc(buf_len, GFP_KERNEL);
4524 buf->num_elems = cpu_to_le16(num_items);
4525 buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
4526 ICE_AQC_RES_TYPE_M) | alloc_shared);
4527 buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
4529 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4530 ice_aqc_opc_free_res, NULL);
4532 ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
4538 #define ICE_PROTOCOL_ENTRY(id, ...) { \
4540 .offs = {__VA_ARGS__}, \
4543 /* This is mapping table entry that maps every word within a given protocol
4544 * structure to the real byte offset as per the specification of that
4546 * for example dst address is 3 words in ethertype header and corresponding
4547 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4548 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4549 * matching entry describing its field. This needs to be updated if new
4550 * structure is added to that union.
4552 static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
4553 ICE_PROTOCOL_ENTRY(ICE_MAC_OFOS, 0, 2, 4, 6, 8, 10, 12),
4554 ICE_PROTOCOL_ENTRY(ICE_MAC_IL, 0, 2, 4, 6, 8, 10, 12),
4555 ICE_PROTOCOL_ENTRY(ICE_ETYPE_OL, 0),
4556 ICE_PROTOCOL_ENTRY(ICE_ETYPE_IL, 0),
4557 ICE_PROTOCOL_ENTRY(ICE_VLAN_OFOS, 2, 0),
4558 ICE_PROTOCOL_ENTRY(ICE_IPV4_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4559 ICE_PROTOCOL_ENTRY(ICE_IPV4_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18),
4560 ICE_PROTOCOL_ENTRY(ICE_IPV6_OFOS, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,
4561 20, 22, 24, 26, 28, 30, 32, 34, 36, 38),
4562 ICE_PROTOCOL_ENTRY(ICE_IPV6_IL, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
4563 22, 24, 26, 28, 30, 32, 34, 36, 38),
4564 ICE_PROTOCOL_ENTRY(ICE_TCP_IL, 0, 2),
4565 ICE_PROTOCOL_ENTRY(ICE_UDP_OF, 0, 2),
4566 ICE_PROTOCOL_ENTRY(ICE_UDP_ILOS, 0, 2),
4567 ICE_PROTOCOL_ENTRY(ICE_VXLAN, 8, 10, 12, 14),
4568 ICE_PROTOCOL_ENTRY(ICE_GENEVE, 8, 10, 12, 14),
4569 ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
4570 ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
4571 ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
4572 ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
4573 ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
4574 ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
4575 ICE_PROTOCOL_ENTRY(ICE_VLAN_IN, 2, 0),
4576 ICE_PROTOCOL_ENTRY(ICE_HW_METADATA,
4577 ICE_SOURCE_PORT_MDID_OFFSET,
4578 ICE_PTYPE_MDID_OFFSET,
4579 ICE_PACKET_LENGTH_MDID_OFFSET,
4580 ICE_SOURCE_VSI_MDID_OFFSET,
4581 ICE_PKT_VLAN_MDID_OFFSET,
4582 ICE_PKT_TUNNEL_MDID_OFFSET,
4583 ICE_PKT_TCP_MDID_OFFSET,
4584 ICE_PKT_ERROR_MDID_OFFSET),
4587 static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
4588 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4589 { ICE_MAC_IL, ICE_MAC_IL_HW },
4590 { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
4591 { ICE_ETYPE_IL, ICE_ETYPE_IL_HW },
4592 { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
4593 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4594 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4595 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4596 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4597 { ICE_TCP_IL, ICE_TCP_IL_HW },
4598 { ICE_UDP_OF, ICE_UDP_OF_HW },
4599 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4600 { ICE_VXLAN, ICE_UDP_OF_HW },
4601 { ICE_GENEVE, ICE_UDP_OF_HW },
4602 { ICE_NVGRE, ICE_GRE_OF_HW },
4603 { ICE_GTP, ICE_UDP_OF_HW },
4604 { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
4605 { ICE_PPPOE, ICE_PPPOE_HW },
4606 { ICE_L2TPV3, ICE_L2TPV3_HW },
4607 { ICE_VLAN_EX, ICE_VLAN_OF_HW },
4608 { ICE_VLAN_IN, ICE_VLAN_OL_HW },
4609 { ICE_HW_METADATA, ICE_META_DATA_ID_HW },
4613 * ice_find_recp - find a recipe
4614 * @hw: pointer to the hardware structure
4615 * @lkup_exts: extension sequence to match
4616 * @tun_type: type of recipe tunnel
4618 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4621 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
4622 enum ice_sw_tunnel_type tun_type)
4624 bool refresh_required = true;
4625 struct ice_sw_recipe *recp;
4628 /* Walk through existing recipes to find a match */
4629 recp = hw->switch_info->recp_list;
4630 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4631 /* If recipe was not created for this ID, in SW bookkeeping,
4632 * check if FW has an entry for this recipe. If the FW has an
4633 * entry update it in our SW bookkeeping and continue with the
4636 if (!recp[i].recp_created)
4637 if (ice_get_recp_frm_fw(hw,
4638 hw->switch_info->recp_list, i,
4642 /* Skip inverse action recipes */
4643 if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
4644 ICE_AQ_RECIPE_ACT_INV_ACT)
4647 /* if number of words we are looking for match */
4648 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4649 struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
4650 struct ice_fv_word *be = lkup_exts->fv_words;
4651 u16 *cr = recp[i].lkup_exts.field_mask;
4652 u16 *de = lkup_exts->field_mask;
4656 /* ar, cr, and qr are related to the recipe words, while
4657 * be, de, and pe are related to the lookup words
4659 for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
4660 for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
4662 if (ar[qr].off == be[pe].off &&
4663 ar[qr].prot_id == be[pe].prot_id &&
4665 /* Found the "pe"th word in the
4670 /* After walking through all the words in the
4671 * "i"th recipe if "p"th word was not found then
4672 * this recipe is not what we are looking for.
4673 * So break out from this loop and try the next
4676 if (qr >= recp[i].lkup_exts.n_val_words) {
4681 /* If for "i"th recipe the found was never set to false
4682 * then it means we found our match
4683 * Also tun type of recipe needs to be checked
4685 if (found && recp[i].tun_type == tun_type)
4686 return i; /* Return the recipe ID */
4689 return ICE_MAX_NUM_RECIPES;
4693 * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
4695 * As protocol id for outer vlan is different in dvm and svm, if dvm is
4696 * supported protocol array record for outer vlan has to be modified to
4697 * reflect the value proper for DVM.
4699 void ice_change_proto_id_to_dvm(void)
4703 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4704 if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
4705 ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
4706 ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
4710 * ice_prot_type_to_id - get protocol ID from protocol type
4711 * @type: protocol type
4712 * @id: pointer to variable that will receive the ID
4714 * Returns true if found, false otherwise
4716 static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
4720 for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
4721 if (ice_prot_id_tbl[i].type == type) {
4722 *id = ice_prot_id_tbl[i].protocol_id;
4729 * ice_fill_valid_words - count valid words
4730 * @rule: advanced rule with lookup information
4731 * @lkup_exts: byte offset extractions of the words that are valid
4733 * calculate valid words in a lookup rule using mask value
4736 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4737 struct ice_prot_lkup_ext *lkup_exts)
4739 u8 j, word, prot_id, ret_val;
4741 if (!ice_prot_type_to_id(rule->type, &prot_id))
4744 word = lkup_exts->n_val_words;
4746 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4747 if (((u16 *)&rule->m_u)[j] &&
4748 rule->type < ARRAY_SIZE(ice_prot_ext)) {
4749 /* No more space to accommodate */
4750 if (word >= ICE_MAX_CHAIN_WORDS)
4752 lkup_exts->fv_words[word].off =
4753 ice_prot_ext[rule->type].offs[j];
4754 lkup_exts->fv_words[word].prot_id =
4755 ice_prot_id_tbl[rule->type].protocol_id;
4756 lkup_exts->field_mask[word] =
4757 be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
4761 ret_val = word - lkup_exts->n_val_words;
4762 lkup_exts->n_val_words = word;
4768 * ice_create_first_fit_recp_def - Create a recipe grouping
4769 * @hw: pointer to the hardware structure
4770 * @lkup_exts: an array of protocol header extractions
4771 * @rg_list: pointer to a list that stores new recipe groups
4772 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4774 * Using first fit algorithm, take all the words that are still not done
4775 * and start grouping them in 4-word groups. Each group makes up one
4779 ice_create_first_fit_recp_def(struct ice_hw *hw,
4780 struct ice_prot_lkup_ext *lkup_exts,
4781 struct list_head *rg_list,
4784 struct ice_pref_recipe_group *grp = NULL;
4789 /* Walk through every word in the rule to check if it is not done. If so
4790 * then this word needs to be part of a new recipe.
4792 for (j = 0; j < lkup_exts->n_val_words; j++)
4793 if (!test_bit(j, lkup_exts->done)) {
4795 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4796 struct ice_recp_grp_entry *entry;
4798 entry = devm_kzalloc(ice_hw_to_dev(hw),
4803 list_add(&entry->l_entry, rg_list);
4804 grp = &entry->r_group;
4808 grp->pairs[grp->n_val_pairs].prot_id =
4809 lkup_exts->fv_words[j].prot_id;
4810 grp->pairs[grp->n_val_pairs].off =
4811 lkup_exts->fv_words[j].off;
4812 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4820 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4821 * @hw: pointer to the hardware structure
4822 * @fv_list: field vector with the extraction sequence information
4823 * @rg_list: recipe groupings with protocol-offset pairs
4825 * Helper function to fill in the field vector indices for protocol-offset
4826 * pairs. These indexes are then ultimately programmed into a recipe.
4829 ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
4830 struct list_head *rg_list)
4832 struct ice_sw_fv_list_entry *fv;
4833 struct ice_recp_grp_entry *rg;
4834 struct ice_fv_word *fv_ext;
4836 if (list_empty(fv_list))
4839 fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
4841 fv_ext = fv->fv_ptr->ew;
4843 list_for_each_entry(rg, rg_list, l_entry) {
4846 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4847 struct ice_fv_word *pr;
4852 pr = &rg->r_group.pairs[i];
4853 mask = rg->r_group.mask[i];
4855 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4856 if (fv_ext[j].prot_id == pr->prot_id &&
4857 fv_ext[j].off == pr->off) {
4860 /* Store index of field vector */
4862 rg->fv_mask[i] = mask;
4866 /* Protocol/offset could not be found, caller gave an
4878 * ice_find_free_recp_res_idx - find free result indexes for recipe
4879 * @hw: pointer to hardware structure
4880 * @profiles: bitmap of profiles that will be associated with the new recipe
4881 * @free_idx: pointer to variable to receive the free index bitmap
4883 * The algorithm used here is:
4884 * 1. When creating a new recipe, create a set P which contains all
4885 * Profiles that will be associated with our new recipe
4887 * 2. For each Profile p in set P:
4888 * a. Add all recipes associated with Profile p into set R
4889 * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
4890 * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
4891 * i. Or just assume they all have the same possible indexes:
4893 * i.e., PossibleIndexes = 0x0000F00000000000
4895 * 3. For each Recipe r in set R:
4896 * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
4897 * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
4899 * FreeIndexes will contain the bits indicating the indexes free for use,
4900 * then the code needs to update the recipe[r].used_result_idx_bits to
4901 * indicate which indexes were selected for use by this recipe.
4904 ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
4905 unsigned long *free_idx)
4907 DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
4908 DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
4909 DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
4912 bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
4913 bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
4915 bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
4917 /* For each profile we are going to associate the recipe with, add the
4918 * recipes that are associated with that profile. This will give us
4919 * the set of recipes that our recipe may collide with. Also, determine
4920 * what possible result indexes are usable given this set of profiles.
4922 for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
4923 bitmap_or(recipes, recipes, profile_to_recipe[bit],
4924 ICE_MAX_NUM_RECIPES);
4925 bitmap_and(possible_idx, possible_idx,
4926 hw->switch_info->prof_res_bm[bit],
4930 /* For each recipe that our new recipe may collide with, determine
4931 * which indexes have been used.
4933 for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
4934 bitmap_or(used_idx, used_idx,
4935 hw->switch_info->recp_list[bit].res_idxs,
4938 bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
4940 /* return number of free indexes */
4941 return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
4945 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4946 * @hw: pointer to hardware structure
4947 * @rm: recipe management list entry
4948 * @profiles: bitmap of profiles that will be associated.
4951 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4952 unsigned long *profiles)
4954 DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
4955 struct ice_aqc_recipe_data_elem *tmp;
4956 struct ice_aqc_recipe_data_elem *buf;
4957 struct ice_recp_grp_entry *entry;
4964 /* When more than one recipe are required, another recipe is needed to
4965 * chain them together. Matching a tunnel metadata ID takes up one of
4966 * the match fields in the chaining recipe reducing the number of
4967 * chained recipes by one.
4969 /* check number of free result indices */
4970 bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
4971 free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
4973 ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
4974 free_res_idx, rm->n_grp_count);
4976 if (rm->n_grp_count > 1) {
4977 if (rm->n_grp_count > free_res_idx)
4983 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
4986 tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
4990 buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
4997 bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4998 recipe_count = ICE_MAX_NUM_RECIPES;
4999 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
5001 if (status || recipe_count == 0)
5004 /* Allocate the recipe resources, and configure them according to the
5005 * match fields from protocol headers and extracted field vectors.
5007 chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
5008 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5011 status = ice_alloc_recipe(hw, &entry->rid);
5015 /* Clear the result index of the located recipe, as this will be
5016 * updated, if needed, later in the recipe creation process.
5018 tmp[0].content.result_indx = 0;
5020 buf[recps] = tmp[0];
5021 buf[recps].recipe_indx = (u8)entry->rid;
5022 /* if the recipe is a non-root recipe RID should be programmed
5023 * as 0 for the rules to be applied correctly.
5025 buf[recps].content.rid = 0;
5026 memset(&buf[recps].content.lkup_indx, 0,
5027 sizeof(buf[recps].content.lkup_indx));
5029 /* All recipes use look-up index 0 to match switch ID. */
5030 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5031 buf[recps].content.mask[0] =
5032 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5033 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
5036 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5037 buf[recps].content.lkup_indx[i] = 0x80;
5038 buf[recps].content.mask[i] = 0;
5041 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
5042 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
5043 buf[recps].content.mask[i + 1] =
5044 cpu_to_le16(entry->fv_mask[i]);
5047 if (rm->n_grp_count > 1) {
5048 /* Checks to see if there really is a valid result index
5051 if (chain_idx >= ICE_MAX_FV_WORDS) {
5052 ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
5057 entry->chain_idx = chain_idx;
5058 buf[recps].content.result_indx =
5059 ICE_AQ_RECIPE_RESULT_EN |
5060 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
5061 ICE_AQ_RECIPE_RESULT_DATA_M);
5062 clear_bit(chain_idx, result_idx_bm);
5063 chain_idx = find_first_bit(result_idx_bm,
5067 /* fill recipe dependencies */
5068 bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
5069 ICE_MAX_NUM_RECIPES);
5070 set_bit(buf[recps].recipe_indx,
5071 (unsigned long *)buf[recps].recipe_bitmap);
5072 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5076 if (rm->n_grp_count == 1) {
5077 rm->root_rid = buf[0].recipe_indx;
5078 set_bit(buf[0].recipe_indx, rm->r_bitmap);
5079 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
5080 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
5081 memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
5082 sizeof(buf[0].recipe_bitmap));
5087 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
5088 * the recipe which is getting created if specified
5089 * by user. Usually any advanced switch filter, which results
5090 * into new extraction sequence, ended up creating a new recipe
5091 * of type ROOT and usually recipes are associated with profiles
5092 * Switch rule referreing newly created recipe, needs to have
5093 * either/or 'fwd' or 'join' priority, otherwise switch rule
5094 * evaluation will not happen correctly. In other words, if
5095 * switch rule to be evaluated on priority basis, then recipe
5096 * needs to have priority, otherwise it will be evaluated last.
5098 buf[0].content.act_ctrl_fwd_priority = rm->priority;
5100 struct ice_recp_grp_entry *last_chain_entry;
5103 /* Allocate the last recipe that will chain the outcomes of the
5104 * other recipes together
5106 status = ice_alloc_recipe(hw, &rid);
5110 buf[recps].recipe_indx = (u8)rid;
5111 buf[recps].content.rid = (u8)rid;
5112 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
5113 /* the new entry created should also be part of rg_list to
5114 * make sure we have complete recipe
5116 last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
5117 sizeof(*last_chain_entry),
5119 if (!last_chain_entry) {
5123 last_chain_entry->rid = rid;
5124 memset(&buf[recps].content.lkup_indx, 0,
5125 sizeof(buf[recps].content.lkup_indx));
5126 /* All recipes use look-up index 0 to match switch ID. */
5127 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
5128 buf[recps].content.mask[0] =
5129 cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
5130 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
5131 buf[recps].content.lkup_indx[i] =
5132 ICE_AQ_RECIPE_LKUP_IGNORE;
5133 buf[recps].content.mask[i] = 0;
5137 /* update r_bitmap with the recp that is used for chaining */
5138 set_bit(rid, rm->r_bitmap);
5139 /* this is the recipe that chains all the other recipes so it
5140 * should not have a chaining ID to indicate the same
5142 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
5143 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5144 last_chain_entry->fv_idx[i] = entry->chain_idx;
5145 buf[recps].content.lkup_indx[i] = entry->chain_idx;
5146 buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
5147 set_bit(entry->rid, rm->r_bitmap);
5149 list_add(&last_chain_entry->l_entry, &rm->rg_list);
5150 if (sizeof(buf[recps].recipe_bitmap) >=
5151 sizeof(rm->r_bitmap)) {
5152 memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
5153 sizeof(buf[recps].recipe_bitmap));
5158 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
5161 rm->root_rid = (u8)rid;
5163 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5167 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
5168 ice_release_change_lock(hw);
5172 /* Every recipe that just got created add it to the recipe
5175 list_for_each_entry(entry, &rm->rg_list, l_entry) {
5176 struct ice_switch_info *sw = hw->switch_info;
5177 bool is_root, idx_found = false;
5178 struct ice_sw_recipe *recp;
5179 u16 idx, buf_idx = 0;
5181 /* find buffer index for copying some data */
5182 for (idx = 0; idx < rm->n_grp_count; idx++)
5183 if (buf[idx].recipe_indx == entry->rid) {
5193 recp = &sw->recp_list[entry->rid];
5194 is_root = (rm->root_rid == entry->rid);
5195 recp->is_root = is_root;
5197 recp->root_rid = entry->rid;
5198 recp->big_recp = (is_root && rm->n_grp_count > 1);
5200 memcpy(&recp->ext_words, entry->r_group.pairs,
5201 entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
5203 memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
5204 sizeof(recp->r_bitmap));
5206 /* Copy non-result fv index values and masks to recipe. This
5207 * call will also update the result recipe bitmask.
5209 ice_collect_result_idx(&buf[buf_idx], recp);
5211 /* for non-root recipes, also copy to the root, this allows
5212 * easier matching of a complete chained recipe
5215 ice_collect_result_idx(&buf[buf_idx],
5216 &sw->recp_list[rm->root_rid]);
5218 recp->n_ext_words = entry->r_group.n_val_pairs;
5219 recp->chain_idx = entry->chain_idx;
5220 recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
5221 recp->n_grp_count = rm->n_grp_count;
5222 recp->tun_type = rm->tun_type;
5223 recp->recp_created = true;
5232 devm_kfree(ice_hw_to_dev(hw), buf);
5237 * ice_create_recipe_group - creates recipe group
5238 * @hw: pointer to hardware structure
5239 * @rm: recipe management list entry
5240 * @lkup_exts: lookup elements
5243 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
5244 struct ice_prot_lkup_ext *lkup_exts)
5249 rm->n_grp_count = 0;
5251 /* Create recipes for words that are marked not done by packing them
5254 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5255 &rm->rg_list, &recp_count);
5257 rm->n_grp_count += recp_count;
5258 rm->n_ext_words = lkup_exts->n_val_words;
5259 memcpy(&rm->ext_words, lkup_exts->fv_words,
5260 sizeof(rm->ext_words));
5261 memcpy(rm->word_masks, lkup_exts->field_mask,
5262 sizeof(rm->word_masks));
5268 /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
5269 * @hw: pointer to hardware structure
5270 * @rinfo: other information regarding the rule e.g. priority and action info
5271 * @bm: pointer to memory for returning the bitmap of field vectors
5274 ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
5277 enum ice_prof_type prof_type;
5279 bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
5281 switch (rinfo->tun_type) {
5283 prof_type = ICE_PROF_NON_TUN;
5285 case ICE_ALL_TUNNELS:
5286 prof_type = ICE_PROF_TUN_ALL;
5288 case ICE_SW_TUN_GENEVE:
5289 case ICE_SW_TUN_VXLAN:
5290 prof_type = ICE_PROF_TUN_UDP;
5292 case ICE_SW_TUN_NVGRE:
5293 prof_type = ICE_PROF_TUN_GRE;
5295 case ICE_SW_TUN_GTPU:
5296 prof_type = ICE_PROF_TUN_GTPU;
5298 case ICE_SW_TUN_GTPC:
5299 prof_type = ICE_PROF_TUN_GTPC;
5301 case ICE_SW_TUN_AND_NON_TUN:
5303 prof_type = ICE_PROF_ALL;
5307 ice_get_sw_fv_bitmap(hw, prof_type, bm);
5311 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5312 * @hw: pointer to hardware structure
5313 * @lkups: lookup elements or match criteria for the advanced recipe, one
5314 * structure per protocol header
5315 * @lkups_cnt: number of protocols
5316 * @rinfo: other information regarding the rule e.g. priority and action info
5317 * @rid: return the recipe ID of the recipe created
5320 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5321 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5323 DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
5324 DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
5325 struct ice_prot_lkup_ext *lkup_exts;
5326 struct ice_recp_grp_entry *r_entry;
5327 struct ice_sw_fv_list_entry *fvit;
5328 struct ice_recp_grp_entry *r_tmp;
5329 struct ice_sw_fv_list_entry *tmp;
5330 struct ice_sw_recipe *rm;
5337 lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
5341 /* Determine the number of words to be matched and if it exceeds a
5342 * recipe's restrictions
5344 for (i = 0; i < lkups_cnt; i++) {
5347 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5349 goto err_free_lkup_exts;
5352 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5355 goto err_free_lkup_exts;
5359 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
5362 goto err_free_lkup_exts;
5365 /* Get field vectors that contain fields extracted from all the protocol
5366 * headers being programmed.
5368 INIT_LIST_HEAD(&rm->fv_list);
5369 INIT_LIST_HEAD(&rm->rg_list);
5371 /* Get bitmap of field vectors (profiles) that are compatible with the
5372 * rule request; only these will be searched in the subsequent call to
5373 * ice_get_sw_fv_list.
5375 ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
5377 status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
5381 /* Group match words into recipes using preferred recipe grouping
5384 status = ice_create_recipe_group(hw, rm, lkup_exts);
5388 /* set the recipe priority if specified */
5389 rm->priority = (u8)rinfo->priority;
5391 /* Find offsets from the field vector. Pick the first one for all the
5394 status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5398 /* get bitmap of all profiles the recipe will be associated with */
5399 bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
5400 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5401 ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
5402 set_bit((u16)fvit->profile_id, profiles);
5405 /* Look for a recipe which matches our requested fv / mask list */
5406 *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
5407 if (*rid < ICE_MAX_NUM_RECIPES)
5408 /* Success if found a recipe that match the existing criteria */
5411 rm->tun_type = rinfo->tun_type;
5412 /* Recipe we need does not exist, add a recipe */
5413 status = ice_add_sw_recipe(hw, rm, profiles);
5417 /* Associate all the recipes created with all the profiles in the
5418 * common field vector.
5420 list_for_each_entry(fvit, &rm->fv_list, list_entry) {
5421 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
5424 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5425 (u8 *)r_bitmap, NULL);
5429 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
5430 ICE_MAX_NUM_RECIPES);
5431 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5435 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5438 ice_release_change_lock(hw);
5443 /* Update profile to recipe bitmap array */
5444 bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
5445 ICE_MAX_NUM_RECIPES);
5447 /* Update recipe to profile bitmap array */
5448 for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
5449 set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
5452 *rid = rm->root_rid;
5453 memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
5454 sizeof(*lkup_exts));
5456 list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
5457 list_del(&r_entry->l_entry);
5458 devm_kfree(ice_hw_to_dev(hw), r_entry);
5461 list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
5462 list_del(&fvit->list_entry);
5463 devm_kfree(ice_hw_to_dev(hw), fvit);
5466 devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
5476 * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
5478 * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
5479 * @num_vlan: number of VLAN tags
5481 static struct ice_dummy_pkt_profile *
5482 ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
5485 struct ice_dummy_pkt_profile *profile;
5486 struct ice_dummy_pkt_offsets *offsets;
5487 u32 buf_len, off, etype_off, i;
5490 if (num_vlan < 1 || num_vlan > 2)
5491 return ERR_PTR(-EINVAL);
5493 off = num_vlan * VLAN_HLEN;
5495 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
5496 dummy_pkt->offsets_len;
5497 offsets = kzalloc(buf_len, GFP_KERNEL);
5499 return ERR_PTR(-ENOMEM);
5501 offsets[0] = dummy_pkt->offsets[0];
5502 if (num_vlan == 2) {
5503 offsets[1] = ice_dummy_qinq_packet_offsets[0];
5504 offsets[2] = ice_dummy_qinq_packet_offsets[1];
5505 } else if (num_vlan == 1) {
5506 offsets[1] = ice_dummy_vlan_packet_offsets[0];
5509 for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5510 offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
5511 offsets[i + num_vlan].offset =
5512 dummy_pkt->offsets[i].offset + off;
5514 offsets[i + num_vlan] = dummy_pkt->offsets[i];
5516 etype_off = dummy_pkt->offsets[1].offset;
5518 buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
5520 pkt = kzalloc(buf_len, GFP_KERNEL);
5523 return ERR_PTR(-ENOMEM);
5526 memcpy(pkt, dummy_pkt->pkt, etype_off);
5527 memcpy(pkt + etype_off,
5528 num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
5530 memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
5531 dummy_pkt->pkt_len - etype_off);
5533 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
5537 return ERR_PTR(-ENOMEM);
5540 profile->offsets = offsets;
5542 profile->pkt_len = buf_len;
5543 profile->match |= ICE_PKT_KMALLOC;
5549 * ice_find_dummy_packet - find dummy packet
5551 * @lkups: lookup elements or match criteria for the advanced recipe, one
5552 * structure per protocol header
5553 * @lkups_cnt: number of protocols
5554 * @tun_type: tunnel type
5556 * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
5558 static const struct ice_dummy_pkt_profile *
5559 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5560 enum ice_sw_tunnel_type tun_type)
5562 const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
5563 u32 match = 0, vlan_count = 0;
5567 case ICE_SW_TUN_GTPC:
5568 match |= ICE_PKT_TUN_GTPC;
5570 case ICE_SW_TUN_GTPU:
5571 match |= ICE_PKT_TUN_GTPU;
5573 case ICE_SW_TUN_NVGRE:
5574 match |= ICE_PKT_TUN_NVGRE;
5576 case ICE_SW_TUN_GENEVE:
5577 case ICE_SW_TUN_VXLAN:
5578 match |= ICE_PKT_TUN_UDP;
5584 for (i = 0; i < lkups_cnt; i++) {
5585 if (lkups[i].type == ICE_UDP_ILOS)
5586 match |= ICE_PKT_INNER_UDP;
5587 else if (lkups[i].type == ICE_TCP_IL)
5588 match |= ICE_PKT_INNER_TCP;
5589 else if (lkups[i].type == ICE_IPV6_OFOS)
5590 match |= ICE_PKT_OUTER_IPV6;
5591 else if (lkups[i].type == ICE_VLAN_OFOS ||
5592 lkups[i].type == ICE_VLAN_EX)
5594 else if (lkups[i].type == ICE_VLAN_IN)
5596 else if (lkups[i].type == ICE_ETYPE_OL &&
5597 lkups[i].h_u.ethertype.ethtype_id ==
5598 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5599 lkups[i].m_u.ethertype.ethtype_id ==
5600 cpu_to_be16(0xFFFF))
5601 match |= ICE_PKT_OUTER_IPV6;
5602 else if (lkups[i].type == ICE_ETYPE_IL &&
5603 lkups[i].h_u.ethertype.ethtype_id ==
5604 cpu_to_be16(ICE_IPV6_ETHER_ID) &&
5605 lkups[i].m_u.ethertype.ethtype_id ==
5606 cpu_to_be16(0xFFFF))
5607 match |= ICE_PKT_INNER_IPV6;
5608 else if (lkups[i].type == ICE_IPV6_IL)
5609 match |= ICE_PKT_INNER_IPV6;
5610 else if (lkups[i].type == ICE_GTP_NO_PAY)
5611 match |= ICE_PKT_GTP_NOPAY;
5612 else if (lkups[i].type == ICE_PPPOE) {
5613 match |= ICE_PKT_PPPOE;
5614 if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
5616 match |= ICE_PKT_OUTER_IPV6;
5617 } else if (lkups[i].type == ICE_L2TPV3)
5618 match |= ICE_PKT_L2TPV3;
5621 while (ret->match && (match & ret->match) != ret->match)
5624 if (vlan_count != 0)
5625 ret = ice_dummy_packet_add_vlan(ret, vlan_count);
5631 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5633 * @lkups: lookup elements or match criteria for the advanced recipe, one
5634 * structure per protocol header
5635 * @lkups_cnt: number of protocols
5636 * @s_rule: stores rule information from the match criteria
5637 * @profile: dummy packet profile (the template, its size and header offsets)
5640 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5641 struct ice_sw_rule_lkup_rx_tx *s_rule,
5642 const struct ice_dummy_pkt_profile *profile)
5647 /* Start with a packet with a pre-defined/dummy content. Then, fill
5648 * in the header values to be looked up or matched.
5650 pkt = s_rule->hdr_data;
5652 memcpy(pkt, profile->pkt, profile->pkt_len);
5654 for (i = 0; i < lkups_cnt; i++) {
5655 const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
5656 enum ice_protocol_type type;
5657 u16 offset = 0, len = 0, j;
5660 /* find the start of this layer; it should be found since this
5661 * was already checked when search for the dummy packet
5663 type = lkups[i].type;
5664 /* metadata isn't present in the packet */
5665 if (type == ICE_HW_METADATA)
5668 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5669 if (type == offsets[j].type) {
5670 offset = offsets[j].offset;
5675 /* this should never happen in a correct calling sequence */
5679 switch (lkups[i].type) {
5682 len = sizeof(struct ice_ether_hdr);
5686 len = sizeof(struct ice_ethtype_hdr);
5691 len = sizeof(struct ice_vlan_hdr);
5695 len = sizeof(struct ice_ipv4_hdr);
5699 len = sizeof(struct ice_ipv6_hdr);
5704 len = sizeof(struct ice_l4_hdr);
5707 len = sizeof(struct ice_sctp_hdr);
5710 len = sizeof(struct ice_nvgre_hdr);
5714 len = sizeof(struct ice_udp_tnl_hdr);
5716 case ICE_GTP_NO_PAY:
5718 len = sizeof(struct ice_udp_gtp_hdr);
5721 len = sizeof(struct ice_pppoe_hdr);
5724 len = sizeof(struct ice_l2tpv3_sess_hdr);
5730 /* the length should be a word multiple */
5731 if (len % ICE_BYTES_PER_WORD)
5734 /* We have the offset to the header start, the length, the
5735 * caller's header values and mask. Use this information to
5736 * copy the data into the dummy packet appropriately based on
5737 * the mask. Note that we need to only write the bits as
5738 * indicated by the mask to make sure we don't improperly write
5739 * over any significant packet data.
5741 for (j = 0; j < len / sizeof(u16); j++) {
5742 u16 *ptr = (u16 *)(pkt + offset);
5743 u16 mask = lkups[i].m_raw[j];
5748 ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
5752 s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
5758 * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
5759 * @hw: pointer to the hardware structure
5760 * @tun_type: tunnel type
5761 * @pkt: dummy packet to fill in
5762 * @offsets: offset info for the dummy packet
5765 ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
5766 u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
5771 case ICE_SW_TUN_VXLAN:
5772 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
5775 case ICE_SW_TUN_GENEVE:
5776 if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
5780 /* Nothing needs to be done for this tunnel type */
5784 /* Find the outer UDP protocol header and insert the port number */
5785 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5786 if (offsets[i].type == ICE_UDP_OF) {
5787 struct ice_l4_hdr *hdr;
5790 offset = offsets[i].offset;
5791 hdr = (struct ice_l4_hdr *)&pkt[offset];
5792 hdr->dst_port = cpu_to_be16(open_port);
5802 * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
5803 * @hw: pointer to hw structure
5804 * @vlan_type: VLAN tag type
5805 * @pkt: dummy packet to fill in
5806 * @offsets: offset info for the dummy packet
5809 ice_fill_adv_packet_vlan(struct ice_hw *hw, u16 vlan_type, u8 *pkt,
5810 const struct ice_dummy_pkt_offsets *offsets)
5814 /* Check if there is something to do */
5815 if (!vlan_type || !ice_is_dvm_ena(hw))
5818 /* Find VLAN header and insert VLAN TPID */
5819 for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
5820 if (offsets[i].type == ICE_VLAN_OFOS ||
5821 offsets[i].type == ICE_VLAN_EX) {
5822 struct ice_vlan_hdr *hdr;
5825 offset = offsets[i].offset;
5826 hdr = (struct ice_vlan_hdr *)&pkt[offset];
5827 hdr->type = cpu_to_be16(vlan_type);
5836 static bool ice_rules_equal(const struct ice_adv_rule_info *first,
5837 const struct ice_adv_rule_info *second)
5839 return first->sw_act.flag == second->sw_act.flag &&
5840 first->tun_type == second->tun_type &&
5841 first->vlan_type == second->vlan_type &&
5842 first->src_vsi == second->src_vsi;
5846 * ice_find_adv_rule_entry - Search a rule entry
5847 * @hw: pointer to the hardware structure
5848 * @lkups: lookup elements or match criteria for the advanced recipe, one
5849 * structure per protocol header
5850 * @lkups_cnt: number of protocols
5851 * @recp_id: recipe ID for which we are finding the rule
5852 * @rinfo: other information regarding the rule e.g. priority and action info
5854 * Helper function to search for a given advance rule entry
5855 * Returns pointer to entry storing the rule if found
5857 static struct ice_adv_fltr_mgmt_list_entry *
5858 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5859 u16 lkups_cnt, u16 recp_id,
5860 struct ice_adv_rule_info *rinfo)
5862 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5863 struct ice_switch_info *sw = hw->switch_info;
5866 list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
5868 bool lkups_matched = true;
5870 if (lkups_cnt != list_itr->lkups_cnt)
5872 for (i = 0; i < list_itr->lkups_cnt; i++)
5873 if (memcmp(&list_itr->lkups[i], &lkups[i],
5875 lkups_matched = false;
5878 if (ice_rules_equal(rinfo, &list_itr->rule_info) &&
5886 * ice_adv_add_update_vsi_list
5887 * @hw: pointer to the hardware structure
5888 * @m_entry: pointer to current adv filter management list entry
5889 * @cur_fltr: filter information from the book keeping entry
5890 * @new_fltr: filter information with the new VSI to be added
5892 * Call AQ command to add or update previously created VSI list with new VSI.
5894 * Helper function to do book keeping associated with adding filter information
5895 * The algorithm to do the booking keeping is described below :
5896 * When a VSI needs to subscribe to a given advanced filter
5897 * if only one VSI has been added till now
5898 * Allocate a new VSI list and add two VSIs
5899 * to this list using switch rule command
5900 * Update the previously created switch rule with the
5901 * newly created VSI list ID
5902 * if a VSI list was previously created
5903 * Add the new VSI to the previously created VSI list set
5904 * using the update switch rule command
5907 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5908 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5909 struct ice_adv_rule_info *cur_fltr,
5910 struct ice_adv_rule_info *new_fltr)
5912 u16 vsi_list_id = 0;
5915 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5916 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
5917 cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5920 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5921 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5922 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5923 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5926 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5927 /* Only one entry existed in the mapping and it was not already
5928 * a part of a VSI list. So, create a VSI list with the old and
5931 struct ice_fltr_info tmp_fltr;
5932 u16 vsi_handle_arr[2];
5934 /* A rule already exists with the new VSI being added */
5935 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5936 new_fltr->sw_act.fwd_id.hw_vsi_id)
5939 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5940 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5941 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5947 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
5948 tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
5949 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5950 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5951 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5952 tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
5954 /* Update the previous switch rule of "forward to VSI" to
5957 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5961 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5962 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5963 m_entry->vsi_list_info =
5964 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5967 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5969 if (!m_entry->vsi_list_info)
5972 /* A rule already exists with the new VSI being added */
5973 if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
5976 /* Update the previously created VSI list set with
5977 * the new VSI ID passed in
5979 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5981 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5983 ice_aqc_opc_update_sw_rules,
5985 /* update VSI list mapping info with new VSI ID */
5987 set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
5990 m_entry->vsi_count++;
5994 void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
5996 lkup->type = ICE_HW_METADATA;
5997 lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] =
5998 cpu_to_be16(ICE_PKT_TUNNEL_MASK);
6001 void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
6003 lkup->type = ICE_HW_METADATA;
6004 lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] =
6005 cpu_to_be16(ICE_PKT_VLAN_MASK);
6008 void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup)
6010 lkup->type = ICE_HW_METADATA;
6011 lkup->m_u.metadata.source_vsi = cpu_to_be16(ICE_MDID_SOURCE_VSI_MASK);
6015 * ice_add_adv_rule - helper function to create an advanced switch rule
6016 * @hw: pointer to the hardware structure
6017 * @lkups: information on the words that needs to be looked up. All words
6018 * together makes one recipe
6019 * @lkups_cnt: num of entries in the lkups array
6020 * @rinfo: other information related to the rule that needs to be programmed
6021 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
6022 * ignored is case of error.
6024 * This function can program only 1 rule at a time. The lkups is used to
6025 * describe the all the words that forms the "lookup" portion of the recipe.
6026 * These words can span multiple protocols. Callers to this function need to
6027 * pass in a list of protocol headers with lookup information along and mask
6028 * that determines which words are valid from the given protocol header.
6029 * rinfo describes other information related to this rule such as forwarding
6030 * IDs, priority of this rule, etc.
6033 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6034 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
6035 struct ice_rule_query_data *added_entry)
6037 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
6038 struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
6039 const struct ice_dummy_pkt_profile *profile;
6040 u16 rid = 0, i, rule_buf_sz, vsi_handle;
6041 struct list_head *rule_head;
6042 struct ice_switch_info *sw;
6048 /* Initialize profile to result index bitmap */
6049 if (!hw->switch_info->prof_res_bm_init) {
6050 hw->switch_info->prof_res_bm_init = 1;
6051 ice_init_prof_result_bm(hw);
6057 /* get # of words we need to match */
6059 for (i = 0; i < lkups_cnt; i++) {
6062 for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
6063 if (lkups[i].m_raw[j])
6070 if (word_cnt > ICE_MAX_CHAIN_WORDS)
6073 /* locate a dummy packet */
6074 profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
6075 if (IS_ERR(profile))
6076 return PTR_ERR(profile);
6078 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
6079 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
6080 rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
6081 rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
6083 goto free_pkt_profile;
6086 vsi_handle = rinfo->sw_act.vsi_handle;
6087 if (!ice_is_vsi_valid(hw, vsi_handle)) {
6089 goto free_pkt_profile;
6092 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6093 rinfo->sw_act.fwd_id.hw_vsi_id =
6094 ice_get_hw_vsi_num(hw, vsi_handle);
6097 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, rinfo->src_vsi);
6099 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
6101 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
6103 goto free_pkt_profile;
6104 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6106 /* we have to add VSI to VSI_LIST and increment vsi_count.
6107 * Also Update VSI list so that we can change forwarding rule
6108 * if the rule already exists, we will check if it exists with
6109 * same vsi_id, if not then add it to the VSI list if it already
6110 * exists if not then create a VSI list and add the existing VSI
6111 * ID and the new VSI ID to the list
6112 * We will add that VSI to the list
6114 status = ice_adv_add_update_vsi_list(hw, m_entry,
6115 &m_entry->rule_info,
6118 added_entry->rid = rid;
6119 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
6120 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6122 goto free_pkt_profile;
6124 rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
6125 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6128 goto free_pkt_profile;
6130 if (!rinfo->flags_info.act_valid) {
6131 act |= ICE_SINGLE_ACT_LAN_ENABLE;
6132 act |= ICE_SINGLE_ACT_LB_ENABLE;
6134 act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
6135 ICE_SINGLE_ACT_LB_ENABLE);
6138 switch (rinfo->sw_act.fltr_act) {
6139 case ICE_FWD_TO_VSI:
6140 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
6141 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
6142 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
6145 act |= ICE_SINGLE_ACT_TO_Q;
6146 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6147 ICE_SINGLE_ACT_Q_INDEX_M;
6149 case ICE_FWD_TO_QGRP:
6150 q_rgn = rinfo->sw_act.qgrp_size > 0 ?
6151 (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
6152 act |= ICE_SINGLE_ACT_TO_Q;
6153 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
6154 ICE_SINGLE_ACT_Q_INDEX_M;
6155 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
6156 ICE_SINGLE_ACT_Q_REGION_M;
6158 case ICE_DROP_PACKET:
6159 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
6160 ICE_SINGLE_ACT_VALID_BIT;
6164 goto err_ice_add_adv_rule;
6167 /* If there is no matching criteria for direction there
6168 * is only one difference between Rx and Tx:
6169 * - get switch id base on VSI number from source field (Tx)
6170 * - get switch id base on port number (Rx)
6172 * If matching on direction metadata is chose rule direction is
6173 * extracted from type value set here.
6175 if (rinfo->sw_act.flag & ICE_FLTR_TX) {
6176 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
6177 s_rule->src = cpu_to_le16(rinfo->sw_act.src);
6179 s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
6180 s_rule->src = cpu_to_le16(hw->port_info->lport);
6183 s_rule->recipe_id = cpu_to_le16(rid);
6184 s_rule->act = cpu_to_le32(act);
6186 status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
6188 goto err_ice_add_adv_rule;
6190 status = ice_fill_adv_packet_tun(hw, rinfo->tun_type, s_rule->hdr_data,
6193 goto err_ice_add_adv_rule;
6195 status = ice_fill_adv_packet_vlan(hw, rinfo->vlan_type,
6199 goto err_ice_add_adv_rule;
6201 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6202 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
6205 goto err_ice_add_adv_rule;
6206 adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
6207 sizeof(struct ice_adv_fltr_mgmt_list_entry),
6211 goto err_ice_add_adv_rule;
6214 adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
6215 lkups_cnt * sizeof(*lkups), GFP_KERNEL);
6216 if (!adv_fltr->lkups) {
6218 goto err_ice_add_adv_rule;
6221 adv_fltr->lkups_cnt = lkups_cnt;
6222 adv_fltr->rule_info = *rinfo;
6223 adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
6224 sw = hw->switch_info;
6225 sw->recp_list[rid].adv_rule = true;
6226 rule_head = &sw->recp_list[rid].filt_rules;
6228 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
6229 adv_fltr->vsi_count = 1;
6231 /* Add rule entry to book keeping list */
6232 list_add(&adv_fltr->list_entry, rule_head);
6234 added_entry->rid = rid;
6235 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
6236 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
6238 err_ice_add_adv_rule:
6239 if (status && adv_fltr) {
6240 devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
6241 devm_kfree(ice_hw_to_dev(hw), adv_fltr);
6247 if (profile->match & ICE_PKT_KMALLOC) {
6248 kfree(profile->offsets);
6249 kfree(profile->pkt);
6257 * ice_replay_vsi_fltr - Replay filters for requested VSI
6258 * @hw: pointer to the hardware structure
6259 * @vsi_handle: driver VSI handle
6260 * @recp_id: Recipe ID for which rules need to be replayed
6261 * @list_head: list for which filters need to be replayed
6263 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6264 * It is required to pass valid VSI handle.
6267 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6268 struct list_head *list_head)
6270 struct ice_fltr_mgmt_list_entry *itr;
6274 if (list_empty(list_head))
6276 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6278 list_for_each_entry(itr, list_head, list_entry) {
6279 struct ice_fltr_list_entry f_entry;
6281 f_entry.fltr_info = itr->fltr_info;
6282 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6283 itr->fltr_info.vsi_handle == vsi_handle) {
6284 /* update the src in case it is VSI num */
6285 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6286 f_entry.fltr_info.src = hw_vsi_id;
6287 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6292 if (!itr->vsi_list_info ||
6293 !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
6295 /* Clearing it so that the logic can add it back */
6296 clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6297 f_entry.fltr_info.vsi_handle = vsi_handle;
6298 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6299 /* update the src in case it is VSI num */
6300 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6301 f_entry.fltr_info.src = hw_vsi_id;
6302 if (recp_id == ICE_SW_LKUP_VLAN)
6303 status = ice_add_vlan_internal(hw, &f_entry);
6305 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6314 * ice_adv_rem_update_vsi_list
6315 * @hw: pointer to the hardware structure
6316 * @vsi_handle: VSI handle of the VSI to remove
6317 * @fm_list: filter management entry for which the VSI list management needs to
6321 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
6322 struct ice_adv_fltr_mgmt_list_entry *fm_list)
6324 struct ice_vsi_list_map_info *vsi_list_info;
6325 enum ice_sw_lkup_type lkup_type;
6329 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
6330 fm_list->vsi_count == 0)
6333 /* A rule with the VSI being removed does not exist */
6334 if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
6337 lkup_type = ICE_SW_LKUP_LAST;
6338 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
6339 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
6340 ice_aqc_opc_update_sw_rules,
6345 fm_list->vsi_count--;
6346 clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
6347 vsi_list_info = fm_list->vsi_list_info;
6348 if (fm_list->vsi_count == 1) {
6349 struct ice_fltr_info tmp_fltr;
6352 rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
6354 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
6357 /* Make sure VSI list is empty before removing it below */
6358 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
6360 ice_aqc_opc_update_sw_rules,
6365 memset(&tmp_fltr, 0, sizeof(tmp_fltr));
6366 tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
6367 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
6368 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
6369 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
6370 tmp_fltr.fwd_id.hw_vsi_id =
6371 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6372 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
6373 ice_get_hw_vsi_num(hw, rem_vsi_handle);
6374 fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
6376 /* Update the previous switch rule of "MAC forward to VSI" to
6377 * "MAC fwd to VSI list"
6379 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
6381 ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
6382 tmp_fltr.fwd_id.hw_vsi_id, status);
6385 fm_list->vsi_list_info->ref_cnt--;
6387 /* Remove the VSI list since it is no longer used */
6388 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
6390 ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
6391 vsi_list_id, status);
6395 list_del(&vsi_list_info->list_entry);
6396 devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
6397 fm_list->vsi_list_info = NULL;
6404 * ice_rem_adv_rule - removes existing advanced switch rule
6405 * @hw: pointer to the hardware structure
6406 * @lkups: information on the words that needs to be looked up. All words
6407 * together makes one recipe
6408 * @lkups_cnt: num of entries in the lkups array
6409 * @rinfo: Its the pointer to the rule information for the rule
6411 * This function can be used to remove 1 rule at a time. The lkups is
6412 * used to describe all the words that forms the "lookup" portion of the
6413 * rule. These words can span multiple protocols. Callers to this function
6414 * need to pass in a list of protocol headers with lookup information along
6415 * and mask that determines which words are valid from the given protocol
6416 * header. rinfo describes other information related to this rule such as
6417 * forwarding IDs, priority of this rule, etc.
6420 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
6421 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
6423 struct ice_adv_fltr_mgmt_list_entry *list_elem;
6424 struct ice_prot_lkup_ext lkup_exts;
6425 bool remove_rule = false;
6426 struct mutex *rule_lock; /* Lock to protect filter rule list */
6427 u16 i, rid, vsi_handle;
6430 memset(&lkup_exts, 0, sizeof(lkup_exts));
6431 for (i = 0; i < lkups_cnt; i++) {
6434 if (lkups[i].type >= ICE_PROTOCOL_LAST)
6437 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
6442 rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
6443 /* If did not find a recipe that match the existing criteria */
6444 if (rid == ICE_MAX_NUM_RECIPES)
6447 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
6448 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
6449 /* the rule is already removed */
6452 mutex_lock(rule_lock);
6453 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
6455 } else if (list_elem->vsi_count > 1) {
6456 remove_rule = false;
6457 vsi_handle = rinfo->sw_act.vsi_handle;
6458 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6460 vsi_handle = rinfo->sw_act.vsi_handle;
6461 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
6463 mutex_unlock(rule_lock);
6466 if (list_elem->vsi_count == 0)
6469 mutex_unlock(rule_lock);
6471 struct ice_sw_rule_lkup_rx_tx *s_rule;
6474 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
6475 s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
6479 s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
6480 s_rule->hdr_len = 0;
6481 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
6483 ice_aqc_opc_remove_sw_rules, NULL);
6484 if (!status || status == -ENOENT) {
6485 struct ice_switch_info *sw = hw->switch_info;
6487 mutex_lock(rule_lock);
6488 list_del(&list_elem->list_entry);
6489 devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
6490 devm_kfree(ice_hw_to_dev(hw), list_elem);
6491 mutex_unlock(rule_lock);
6492 if (list_empty(&sw->recp_list[rid].filt_rules))
6493 sw->recp_list[rid].adv_rule = false;
6501 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
6502 * @hw: pointer to the hardware structure
6503 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
6505 * This function is used to remove 1 rule at a time. The removal is based on
6506 * the remove_entry parameter. This function will remove rule for a given
6507 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
6510 ice_rem_adv_rule_by_id(struct ice_hw *hw,
6511 struct ice_rule_query_data *remove_entry)
6513 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6514 struct list_head *list_head;
6515 struct ice_adv_rule_info rinfo;
6516 struct ice_switch_info *sw;
6518 sw = hw->switch_info;
6519 if (!sw->recp_list[remove_entry->rid].recp_created)
6521 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
6522 list_for_each_entry(list_itr, list_head, list_entry) {
6523 if (list_itr->rule_info.fltr_rule_id ==
6524 remove_entry->rule_id) {
6525 rinfo = list_itr->rule_info;
6526 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
6527 return ice_rem_adv_rule(hw, list_itr->lkups,
6528 list_itr->lkups_cnt, &rinfo);
6531 /* either list is empty or unable to find rule */
6536 * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
6538 * @hw: pointer to the hardware structure
6539 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
6541 * This function is used to remove all the rules for a given VSI and as soon
6542 * as removing a rule fails, it will return immediately with the error code,
6543 * else it will return success.
6545 int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6547 struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
6548 struct ice_vsi_list_map_info *map_info;
6549 struct ice_adv_rule_info rinfo;
6550 struct list_head *list_head;
6551 struct ice_switch_info *sw;
6555 sw = hw->switch_info;
6556 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6557 if (!sw->recp_list[rid].recp_created)
6559 if (!sw->recp_list[rid].adv_rule)
6562 list_head = &sw->recp_list[rid].filt_rules;
6563 list_for_each_entry_safe(list_itr, tmp_entry, list_head,
6565 rinfo = list_itr->rule_info;
6567 if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
6568 map_info = list_itr->vsi_list_info;
6572 if (!test_bit(vsi_handle, map_info->vsi_map))
6574 } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
6578 rinfo.sw_act.vsi_handle = vsi_handle;
6579 status = ice_rem_adv_rule(hw, list_itr->lkups,
6580 list_itr->lkups_cnt, &rinfo);
6589 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6590 * @hw: pointer to the hardware structure
6591 * @vsi_handle: driver VSI handle
6592 * @list_head: list for which filters need to be replayed
6594 * Replay the advanced rule for the given VSI.
6597 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6598 struct list_head *list_head)
6600 struct ice_rule_query_data added_entry = { 0 };
6601 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6604 if (list_empty(list_head))
6606 list_for_each_entry(adv_fltr, list_head, list_entry) {
6607 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6608 u16 lk_cnt = adv_fltr->lkups_cnt;
6610 if (vsi_handle != rinfo->sw_act.vsi_handle)
6612 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6621 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6622 * @hw: pointer to the hardware structure
6623 * @vsi_handle: driver VSI handle
6625 * Replays filters for requested VSI via vsi_handle.
6627 int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6629 struct ice_switch_info *sw = hw->switch_info;
6633 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6634 struct list_head *head;
6636 head = &sw->recp_list[i].filt_replay_rules;
6637 if (!sw->recp_list[i].adv_rule)
6638 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6640 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6648 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6649 * @hw: pointer to the HW struct
6651 * Deletes the filter replay rules.
6653 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6655 struct ice_switch_info *sw = hw->switch_info;
6661 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6662 if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
6663 struct list_head *l_head;
6665 l_head = &sw->recp_list[i].filt_replay_rules;
6666 if (!sw->recp_list[i].adv_rule)
6667 ice_rem_sw_rule_info(hw, l_head);
6669 ice_rem_adv_rule_info(hw, l_head);