]> Git Repo - linux.git/blob - drivers/infiniband/hw/mlx5/fs.c
enetc: Migrate to PHYLINK and PCS_LYNX
[linux.git] / drivers / infiniband / hw / mlx5 / fs.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
4  */
5
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/uverbs_std_types.h>
11 #include <rdma/mlx5_user_ioctl_cmds.h>
12 #include <rdma/mlx5_user_ioctl_verbs.h>
13 #include <rdma/ib_umem.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
16 #include <linux/mlx5/fs_helpers.h>
17 #include <linux/mlx5/accel.h>
18 #include <linux/mlx5/eswitch.h>
19 #include "mlx5_ib.h"
20 #include "counters.h"
21 #include "devx.h"
22 #include "fs.h"
23
24 #define UVERBS_MODULE_NAME mlx5_ib
25 #include <rdma/uverbs_named_ioctl.h>
26
27 enum {
28         MATCH_CRITERIA_ENABLE_OUTER_BIT,
29         MATCH_CRITERIA_ENABLE_MISC_BIT,
30         MATCH_CRITERIA_ENABLE_INNER_BIT,
31         MATCH_CRITERIA_ENABLE_MISC2_BIT
32 };
33
34 #define HEADER_IS_ZERO(match_criteria, headers)                            \
35         !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
36                     0, MLX5_FLD_SZ_BYTES(fte_match_param, headers)))       \
37
38 static u8 get_match_criteria_enable(u32 *match_criteria)
39 {
40         u8 match_criteria_enable;
41
42         match_criteria_enable =
43                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
44                 MATCH_CRITERIA_ENABLE_OUTER_BIT;
45         match_criteria_enable |=
46                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
47                 MATCH_CRITERIA_ENABLE_MISC_BIT;
48         match_criteria_enable |=
49                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
50                 MATCH_CRITERIA_ENABLE_INNER_BIT;
51         match_criteria_enable |=
52                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
53                 MATCH_CRITERIA_ENABLE_MISC2_BIT;
54
55         return match_criteria_enable;
56 }
57
58 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
59 {
60         u8 entry_mask;
61         u8 entry_val;
62         int err = 0;
63
64         if (!mask)
65                 goto out;
66
67         entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
68                               ip_protocol);
69         entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
70                              ip_protocol);
71         if (!entry_mask) {
72                 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
73                 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
74                 goto out;
75         }
76         /* Don't override existing ip protocol */
77         if (mask != entry_mask || val != entry_val)
78                 err = -EINVAL;
79 out:
80         return err;
81 }
82
83 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
84                            bool inner)
85 {
86         if (inner) {
87                 MLX5_SET(fte_match_set_misc,
88                          misc_c, inner_ipv6_flow_label, mask);
89                 MLX5_SET(fte_match_set_misc,
90                          misc_v, inner_ipv6_flow_label, val);
91         } else {
92                 MLX5_SET(fte_match_set_misc,
93                          misc_c, outer_ipv6_flow_label, mask);
94                 MLX5_SET(fte_match_set_misc,
95                          misc_v, outer_ipv6_flow_label, val);
96         }
97 }
98
99 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
100 {
101         MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
102         MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
103         MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
104         MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
105 }
106
107 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
108 {
109         if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
110             !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
111                 return -EOPNOTSUPP;
112
113         if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
114             !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
115                 return -EOPNOTSUPP;
116
117         if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
118             !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
119                 return -EOPNOTSUPP;
120
121         if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
122             !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
123                 return -EOPNOTSUPP;
124
125         return 0;
126 }
127
128 #define LAST_ETH_FIELD vlan_tag
129 #define LAST_IB_FIELD sl
130 #define LAST_IPV4_FIELD tos
131 #define LAST_IPV6_FIELD traffic_class
132 #define LAST_TCP_UDP_FIELD src_port
133 #define LAST_TUNNEL_FIELD tunnel_id
134 #define LAST_FLOW_TAG_FIELD tag_id
135 #define LAST_DROP_FIELD size
136 #define LAST_COUNTERS_FIELD counters
137
138 /* Field is the last supported field */
139 #define FIELDS_NOT_SUPPORTED(filter, field)\
140         memchr_inv((void *)&filter.field  +\
141                    sizeof(filter.field), 0,\
142                    sizeof(filter) -\
143                    offsetof(typeof(filter), field) -\
144                    sizeof(filter.field))
145
146 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
147                            bool is_egress,
148                            struct mlx5_flow_act *action)
149 {
150
151         switch (maction->ib_action.type) {
152         case IB_FLOW_ACTION_ESP:
153                 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
154                                       MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
155                         return -EINVAL;
156                 /* Currently only AES_GCM keymat is supported by the driver */
157                 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
158                 action->action |= is_egress ?
159                         MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
160                         MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
161                 return 0;
162         case IB_FLOW_ACTION_UNSPECIFIED:
163                 if (maction->flow_action_raw.sub_type ==
164                     MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
165                         if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
166                                 return -EINVAL;
167                         action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
168                         action->modify_hdr =
169                                 maction->flow_action_raw.modify_hdr;
170                         return 0;
171                 }
172                 if (maction->flow_action_raw.sub_type ==
173                     MLX5_IB_FLOW_ACTION_DECAP) {
174                         if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
175                                 return -EINVAL;
176                         action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
177                         return 0;
178                 }
179                 if (maction->flow_action_raw.sub_type ==
180                     MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
181                         if (action->action &
182                             MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
183                                 return -EINVAL;
184                         action->action |=
185                                 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
186                         action->pkt_reformat =
187                                 maction->flow_action_raw.pkt_reformat;
188                         return 0;
189                 }
190                 fallthrough;
191         default:
192                 return -EOPNOTSUPP;
193         }
194 }
195
196 static int parse_flow_attr(struct mlx5_core_dev *mdev,
197                            struct mlx5_flow_spec *spec,
198                            const union ib_flow_spec *ib_spec,
199                            const struct ib_flow_attr *flow_attr,
200                            struct mlx5_flow_act *action, u32 prev_type)
201 {
202         struct mlx5_flow_context *flow_context = &spec->flow_context;
203         u32 *match_c = spec->match_criteria;
204         u32 *match_v = spec->match_value;
205         void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
206                                            misc_parameters);
207         void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
208                                            misc_parameters);
209         void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
210                                             misc_parameters_2);
211         void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
212                                             misc_parameters_2);
213         void *headers_c;
214         void *headers_v;
215         int match_ipv;
216         int ret;
217
218         if (ib_spec->type & IB_FLOW_SPEC_INNER) {
219                 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
220                                          inner_headers);
221                 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
222                                          inner_headers);
223                 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
224                                         ft_field_support.inner_ip_version);
225         } else {
226                 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
227                                          outer_headers);
228                 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
229                                          outer_headers);
230                 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
231                                         ft_field_support.outer_ip_version);
232         }
233
234         switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
235         case IB_FLOW_SPEC_ETH:
236                 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
237                         return -EOPNOTSUPP;
238
239                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
240                                              dmac_47_16),
241                                 ib_spec->eth.mask.dst_mac);
242                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
243                                              dmac_47_16),
244                                 ib_spec->eth.val.dst_mac);
245
246                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
247                                              smac_47_16),
248                                 ib_spec->eth.mask.src_mac);
249                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
250                                              smac_47_16),
251                                 ib_spec->eth.val.src_mac);
252
253                 if (ib_spec->eth.mask.vlan_tag) {
254                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
255                                  cvlan_tag, 1);
256                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
257                                  cvlan_tag, 1);
258
259                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
260                                  first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
261                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
262                                  first_vid, ntohs(ib_spec->eth.val.vlan_tag));
263
264                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
265                                  first_cfi,
266                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
267                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268                                  first_cfi,
269                                  ntohs(ib_spec->eth.val.vlan_tag) >> 12);
270
271                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
272                                  first_prio,
273                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
274                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
275                                  first_prio,
276                                  ntohs(ib_spec->eth.val.vlan_tag) >> 13);
277                 }
278                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
279                          ethertype, ntohs(ib_spec->eth.mask.ether_type));
280                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
281                          ethertype, ntohs(ib_spec->eth.val.ether_type));
282                 break;
283         case IB_FLOW_SPEC_IPV4:
284                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
285                         return -EOPNOTSUPP;
286
287                 if (match_ipv) {
288                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
289                                  ip_version, 0xf);
290                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
291                                  ip_version, MLX5_FS_IPV4_VERSION);
292                 } else {
293                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
294                                  ethertype, 0xffff);
295                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
296                                  ethertype, ETH_P_IP);
297                 }
298
299                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
300                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
301                        &ib_spec->ipv4.mask.src_ip,
302                        sizeof(ib_spec->ipv4.mask.src_ip));
303                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
304                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
305                        &ib_spec->ipv4.val.src_ip,
306                        sizeof(ib_spec->ipv4.val.src_ip));
307                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
308                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
309                        &ib_spec->ipv4.mask.dst_ip,
310                        sizeof(ib_spec->ipv4.mask.dst_ip));
311                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
312                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
313                        &ib_spec->ipv4.val.dst_ip,
314                        sizeof(ib_spec->ipv4.val.dst_ip));
315
316                 set_tos(headers_c, headers_v,
317                         ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
318
319                 if (set_proto(headers_c, headers_v,
320                               ib_spec->ipv4.mask.proto,
321                               ib_spec->ipv4.val.proto))
322                         return -EINVAL;
323                 break;
324         case IB_FLOW_SPEC_IPV6:
325                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
326                         return -EOPNOTSUPP;
327
328                 if (match_ipv) {
329                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
330                                  ip_version, 0xf);
331                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
332                                  ip_version, MLX5_FS_IPV6_VERSION);
333                 } else {
334                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
335                                  ethertype, 0xffff);
336                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
337                                  ethertype, ETH_P_IPV6);
338                 }
339
340                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
341                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
342                        &ib_spec->ipv6.mask.src_ip,
343                        sizeof(ib_spec->ipv6.mask.src_ip));
344                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
345                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
346                        &ib_spec->ipv6.val.src_ip,
347                        sizeof(ib_spec->ipv6.val.src_ip));
348                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
349                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
350                        &ib_spec->ipv6.mask.dst_ip,
351                        sizeof(ib_spec->ipv6.mask.dst_ip));
352                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
353                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
354                        &ib_spec->ipv6.val.dst_ip,
355                        sizeof(ib_spec->ipv6.val.dst_ip));
356
357                 set_tos(headers_c, headers_v,
358                         ib_spec->ipv6.mask.traffic_class,
359                         ib_spec->ipv6.val.traffic_class);
360
361                 if (set_proto(headers_c, headers_v,
362                               ib_spec->ipv6.mask.next_hdr,
363                               ib_spec->ipv6.val.next_hdr))
364                         return -EINVAL;
365
366                 set_flow_label(misc_params_c, misc_params_v,
367                                ntohl(ib_spec->ipv6.mask.flow_label),
368                                ntohl(ib_spec->ipv6.val.flow_label),
369                                ib_spec->type & IB_FLOW_SPEC_INNER);
370                 break;
371         case IB_FLOW_SPEC_ESP:
372                 if (ib_spec->esp.mask.seq)
373                         return -EOPNOTSUPP;
374
375                 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
376                          ntohl(ib_spec->esp.mask.spi));
377                 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
378                          ntohl(ib_spec->esp.val.spi));
379                 break;
380         case IB_FLOW_SPEC_TCP:
381                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
382                                          LAST_TCP_UDP_FIELD))
383                         return -EOPNOTSUPP;
384
385                 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
386                         return -EINVAL;
387
388                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
389                          ntohs(ib_spec->tcp_udp.mask.src_port));
390                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
391                          ntohs(ib_spec->tcp_udp.val.src_port));
392
393                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
394                          ntohs(ib_spec->tcp_udp.mask.dst_port));
395                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
396                          ntohs(ib_spec->tcp_udp.val.dst_port));
397                 break;
398         case IB_FLOW_SPEC_UDP:
399                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
400                                          LAST_TCP_UDP_FIELD))
401                         return -EOPNOTSUPP;
402
403                 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
404                         return -EINVAL;
405
406                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
407                          ntohs(ib_spec->tcp_udp.mask.src_port));
408                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
409                          ntohs(ib_spec->tcp_udp.val.src_port));
410
411                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
412                          ntohs(ib_spec->tcp_udp.mask.dst_port));
413                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
414                          ntohs(ib_spec->tcp_udp.val.dst_port));
415                 break;
416         case IB_FLOW_SPEC_GRE:
417                 if (ib_spec->gre.mask.c_ks_res0_ver)
418                         return -EOPNOTSUPP;
419
420                 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
421                         return -EINVAL;
422
423                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
424                          0xff);
425                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
426                          IPPROTO_GRE);
427
428                 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
429                          ntohs(ib_spec->gre.mask.protocol));
430                 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
431                          ntohs(ib_spec->gre.val.protocol));
432
433                 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
434                                     gre_key.nvgre.hi),
435                        &ib_spec->gre.mask.key,
436                        sizeof(ib_spec->gre.mask.key));
437                 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
438                                     gre_key.nvgre.hi),
439                        &ib_spec->gre.val.key,
440                        sizeof(ib_spec->gre.val.key));
441                 break;
442         case IB_FLOW_SPEC_MPLS:
443                 switch (prev_type) {
444                 case IB_FLOW_SPEC_UDP:
445                         if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
446                                                    ft_field_support.outer_first_mpls_over_udp),
447                                                    &ib_spec->mpls.mask.tag))
448                                 return -EOPNOTSUPP;
449
450                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
451                                             outer_first_mpls_over_udp),
452                                &ib_spec->mpls.val.tag,
453                                sizeof(ib_spec->mpls.val.tag));
454                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
455                                             outer_first_mpls_over_udp),
456                                &ib_spec->mpls.mask.tag,
457                                sizeof(ib_spec->mpls.mask.tag));
458                         break;
459                 case IB_FLOW_SPEC_GRE:
460                         if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
461                                                    ft_field_support.outer_first_mpls_over_gre),
462                                                    &ib_spec->mpls.mask.tag))
463                                 return -EOPNOTSUPP;
464
465                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
466                                             outer_first_mpls_over_gre),
467                                &ib_spec->mpls.val.tag,
468                                sizeof(ib_spec->mpls.val.tag));
469                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
470                                             outer_first_mpls_over_gre),
471                                &ib_spec->mpls.mask.tag,
472                                sizeof(ib_spec->mpls.mask.tag));
473                         break;
474                 default:
475                         if (ib_spec->type & IB_FLOW_SPEC_INNER) {
476                                 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
477                                                            ft_field_support.inner_first_mpls),
478                                                            &ib_spec->mpls.mask.tag))
479                                         return -EOPNOTSUPP;
480
481                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
482                                                     inner_first_mpls),
483                                        &ib_spec->mpls.val.tag,
484                                        sizeof(ib_spec->mpls.val.tag));
485                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
486                                                     inner_first_mpls),
487                                        &ib_spec->mpls.mask.tag,
488                                        sizeof(ib_spec->mpls.mask.tag));
489                         } else {
490                                 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
491                                                            ft_field_support.outer_first_mpls),
492                                                            &ib_spec->mpls.mask.tag))
493                                         return -EOPNOTSUPP;
494
495                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
496                                                     outer_first_mpls),
497                                        &ib_spec->mpls.val.tag,
498                                        sizeof(ib_spec->mpls.val.tag));
499                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
500                                                     outer_first_mpls),
501                                        &ib_spec->mpls.mask.tag,
502                                        sizeof(ib_spec->mpls.mask.tag));
503                         }
504                 }
505                 break;
506         case IB_FLOW_SPEC_VXLAN_TUNNEL:
507                 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
508                                          LAST_TUNNEL_FIELD))
509                         return -EOPNOTSUPP;
510
511                 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
512                          ntohl(ib_spec->tunnel.mask.tunnel_id));
513                 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
514                          ntohl(ib_spec->tunnel.val.tunnel_id));
515                 break;
516         case IB_FLOW_SPEC_ACTION_TAG:
517                 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
518                                          LAST_FLOW_TAG_FIELD))
519                         return -EOPNOTSUPP;
520                 if (ib_spec->flow_tag.tag_id >= BIT(24))
521                         return -EINVAL;
522
523                 flow_context->flow_tag = ib_spec->flow_tag.tag_id;
524                 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
525                 break;
526         case IB_FLOW_SPEC_ACTION_DROP:
527                 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
528                                          LAST_DROP_FIELD))
529                         return -EOPNOTSUPP;
530                 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
531                 break;
532         case IB_FLOW_SPEC_ACTION_HANDLE:
533                 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
534                         flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
535                 if (ret)
536                         return ret;
537                 break;
538         case IB_FLOW_SPEC_ACTION_COUNT:
539                 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
540                                          LAST_COUNTERS_FIELD))
541                         return -EOPNOTSUPP;
542
543                 /* for now support only one counters spec per flow */
544                 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
545                         return -EINVAL;
546
547                 action->counters = ib_spec->flow_count.counters;
548                 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
549                 break;
550         default:
551                 return -EINVAL;
552         }
553
554         return 0;
555 }
556
557 /* If a flow could catch both multicast and unicast packets,
558  * it won't fall into the multicast flow steering table and this rule
559  * could steal other multicast packets.
560  */
561 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
562 {
563         union ib_flow_spec *flow_spec;
564
565         if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
566             ib_attr->num_of_specs < 1)
567                 return false;
568
569         flow_spec = (union ib_flow_spec *)(ib_attr + 1);
570         if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
571                 struct ib_flow_spec_ipv4 *ipv4_spec;
572
573                 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
574                 if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
575                         return true;
576
577                 return false;
578         }
579
580         if (flow_spec->type == IB_FLOW_SPEC_ETH) {
581                 struct ib_flow_spec_eth *eth_spec;
582
583                 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
584                 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
585                        is_multicast_ether_addr(eth_spec->val.dst_mac);
586         }
587
588         return false;
589 }
590
591 enum valid_spec {
592         VALID_SPEC_INVALID,
593         VALID_SPEC_VALID,
594         VALID_SPEC_NA,
595 };
596
597 static enum valid_spec
598 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
599                      const struct mlx5_flow_spec *spec,
600                      const struct mlx5_flow_act *flow_act,
601                      bool egress)
602 {
603         const u32 *match_c = spec->match_criteria;
604         bool is_crypto =
605                 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
606                                      MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
607         bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
608         bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
609
610         /*
611          * Currently only crypto is supported in egress, when regular egress
612          * rules would be supported, always return VALID_SPEC_NA.
613          */
614         if (!is_crypto)
615                 return VALID_SPEC_NA;
616
617         return is_crypto && is_ipsec &&
618                 (!egress || (!is_drop &&
619                              !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
620                 VALID_SPEC_VALID : VALID_SPEC_INVALID;
621 }
622
623 static bool is_valid_spec(struct mlx5_core_dev *mdev,
624                           const struct mlx5_flow_spec *spec,
625                           const struct mlx5_flow_act *flow_act,
626                           bool egress)
627 {
628         /* We curretly only support ipsec egress flow */
629         return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
630 }
631
632 static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
633                                const struct ib_flow_attr *flow_attr,
634                                bool check_inner)
635 {
636         union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
637         int match_ipv = check_inner ?
638                         MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
639                                         ft_field_support.inner_ip_version) :
640                         MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
641                                         ft_field_support.outer_ip_version);
642         int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
643         bool ipv4_spec_valid, ipv6_spec_valid;
644         unsigned int ip_spec_type = 0;
645         bool has_ethertype = false;
646         unsigned int spec_index;
647         bool mask_valid = true;
648         u16 eth_type = 0;
649         bool type_valid;
650
651         /* Validate that ethertype is correct */
652         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
653                 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
654                     ib_spec->eth.mask.ether_type) {
655                         mask_valid = (ib_spec->eth.mask.ether_type ==
656                                       htons(0xffff));
657                         has_ethertype = true;
658                         eth_type = ntohs(ib_spec->eth.val.ether_type);
659                 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
660                            (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
661                         ip_spec_type = ib_spec->type;
662                 }
663                 ib_spec = (void *)ib_spec + ib_spec->size;
664         }
665
666         type_valid = (!has_ethertype) || (!ip_spec_type);
667         if (!type_valid && mask_valid) {
668                 ipv4_spec_valid = (eth_type == ETH_P_IP) &&
669                         (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
670                 ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
671                         (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
672
673                 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
674                              (((eth_type == ETH_P_MPLS_UC) ||
675                                (eth_type == ETH_P_MPLS_MC)) && match_ipv);
676         }
677
678         return type_valid;
679 }
680
681 static bool is_valid_attr(struct mlx5_core_dev *mdev,
682                           const struct ib_flow_attr *flow_attr)
683 {
684         return is_valid_ethertype(mdev, flow_attr, false) &&
685                is_valid_ethertype(mdev, flow_attr, true);
686 }
687
688 static void put_flow_table(struct mlx5_ib_dev *dev,
689                            struct mlx5_ib_flow_prio *prio, bool ft_added)
690 {
691         prio->refcount -= !!ft_added;
692         if (!prio->refcount) {
693                 mlx5_destroy_flow_table(prio->flow_table);
694                 prio->flow_table = NULL;
695         }
696 }
697
698 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
699 {
700         struct mlx5_ib_flow_handler *handler = container_of(flow_id,
701                                                           struct mlx5_ib_flow_handler,
702                                                           ibflow);
703         struct mlx5_ib_flow_handler *iter, *tmp;
704         struct mlx5_ib_dev *dev = handler->dev;
705
706         mutex_lock(&dev->flow_db->lock);
707
708         list_for_each_entry_safe(iter, tmp, &handler->list, list) {
709                 mlx5_del_flow_rules(iter->rule);
710                 put_flow_table(dev, iter->prio, true);
711                 list_del(&iter->list);
712                 kfree(iter);
713         }
714
715         mlx5_del_flow_rules(handler->rule);
716         put_flow_table(dev, handler->prio, true);
717         mlx5_ib_counters_clear_description(handler->ibcounters);
718         mutex_unlock(&dev->flow_db->lock);
719         if (handler->flow_matcher)
720                 atomic_dec(&handler->flow_matcher->usecnt);
721         kfree(handler);
722
723         return 0;
724 }
725
726 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
727 {
728         priority *= 2;
729         if (!dont_trap)
730                 priority++;
731         return priority;
732 }
733
734 enum flow_table_type {
735         MLX5_IB_FT_RX,
736         MLX5_IB_FT_TX
737 };
738
739 #define MLX5_FS_MAX_TYPES        6
740 #define MLX5_FS_MAX_ENTRIES      BIT(16)
741
742 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
743                                            struct mlx5_ib_flow_prio *prio,
744                                            int priority,
745                                            int num_entries, int num_groups,
746                                            u32 flags)
747 {
748         struct mlx5_flow_table_attr ft_attr = {};
749         struct mlx5_flow_table *ft;
750
751         ft_attr.prio = priority;
752         ft_attr.max_fte = num_entries;
753         ft_attr.flags = flags;
754         ft_attr.autogroup.max_num_groups = num_groups;
755         ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
756         if (IS_ERR(ft))
757                 return ERR_CAST(ft);
758
759         prio->flow_table = ft;
760         prio->refcount = 0;
761         return prio;
762 }
763
764 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
765                                                 struct ib_flow_attr *flow_attr,
766                                                 enum flow_table_type ft_type)
767 {
768         bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
769         struct mlx5_flow_namespace *ns = NULL;
770         struct mlx5_ib_flow_prio *prio;
771         struct mlx5_flow_table *ft;
772         int max_table_size;
773         int num_entries;
774         int num_groups;
775         bool esw_encap;
776         u32 flags = 0;
777         int priority;
778
779         max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
780                                                        log_max_ft_size));
781         esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
782                 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
783         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
784                 enum mlx5_flow_namespace_type fn_type;
785
786                 if (flow_is_multicast_only(flow_attr) &&
787                     !dont_trap)
788                         priority = MLX5_IB_FLOW_MCAST_PRIO;
789                 else
790                         priority = ib_prio_to_core_prio(flow_attr->priority,
791                                                         dont_trap);
792                 if (ft_type == MLX5_IB_FT_RX) {
793                         fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
794                         prio = &dev->flow_db->prios[priority];
795                         if (!dev->is_rep && !esw_encap &&
796                             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
797                                 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
798                         if (!dev->is_rep && !esw_encap &&
799                             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
800                                         reformat_l3_tunnel_to_l2))
801                                 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
802                 } else {
803                         max_table_size =
804                                 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
805                                                               log_max_ft_size));
806                         fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
807                         prio = &dev->flow_db->egress_prios[priority];
808                         if (!dev->is_rep && !esw_encap &&
809                             MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
810                                 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
811                 }
812                 ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
813                 num_entries = MLX5_FS_MAX_ENTRIES;
814                 num_groups = MLX5_FS_MAX_TYPES;
815         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
816                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
817                 ns = mlx5_get_flow_namespace(dev->mdev,
818                                              MLX5_FLOW_NAMESPACE_LEFTOVERS);
819                 build_leftovers_ft_param(&priority,
820                                          &num_entries,
821                                          &num_groups);
822                 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
823         } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
824                 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
825                                         allow_sniffer_and_nic_rx_shared_tir))
826                         return ERR_PTR(-EOPNOTSUPP);
827
828                 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
829                                              MLX5_FLOW_NAMESPACE_SNIFFER_RX :
830                                              MLX5_FLOW_NAMESPACE_SNIFFER_TX);
831
832                 prio = &dev->flow_db->sniffer[ft_type];
833                 priority = 0;
834                 num_entries = 1;
835                 num_groups = 1;
836         }
837
838         if (!ns)
839                 return ERR_PTR(-EOPNOTSUPP);
840
841         max_table_size = min_t(int, num_entries, max_table_size);
842
843         ft = prio->flow_table;
844         if (!ft)
845                 return _get_prio(ns, prio, priority, max_table_size, num_groups,
846                                  flags);
847
848         return prio;
849 }
850
851 static void set_underlay_qp(struct mlx5_ib_dev *dev,
852                             struct mlx5_flow_spec *spec,
853                             u32 underlay_qpn)
854 {
855         void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
856                                            spec->match_criteria,
857                                            misc_parameters);
858         void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
859                                            misc_parameters);
860
861         if (underlay_qpn &&
862             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
863                                       ft_field_support.bth_dst_qp)) {
864                 MLX5_SET(fte_match_set_misc,
865                          misc_params_v, bth_dst_qp, underlay_qpn);
866                 MLX5_SET(fte_match_set_misc,
867                          misc_params_c, bth_dst_qp, 0xffffff);
868         }
869 }
870
871 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
872                                          struct mlx5_flow_spec *spec,
873                                          struct mlx5_eswitch_rep *rep)
874 {
875         struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
876         void *misc;
877
878         if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
879                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
880                                     misc_parameters_2);
881
882                 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
883                          mlx5_eswitch_get_vport_metadata_for_match(esw,
884                                                                    rep->vport));
885                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
886                                     misc_parameters_2);
887
888                 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
889                          mlx5_eswitch_get_vport_metadata_mask());
890         } else {
891                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
892                                     misc_parameters);
893
894                 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
895
896                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
897                                     misc_parameters);
898
899                 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
900         }
901 }
902
903 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
904                                                       struct mlx5_ib_flow_prio *ft_prio,
905                                                       const struct ib_flow_attr *flow_attr,
906                                                       struct mlx5_flow_destination *dst,
907                                                       u32 underlay_qpn,
908                                                       struct mlx5_ib_create_flow *ucmd)
909 {
910         struct mlx5_flow_table  *ft = ft_prio->flow_table;
911         struct mlx5_ib_flow_handler *handler;
912         struct mlx5_flow_act flow_act = {};
913         struct mlx5_flow_spec *spec;
914         struct mlx5_flow_destination dest_arr[2] = {};
915         struct mlx5_flow_destination *rule_dst = dest_arr;
916         const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
917         unsigned int spec_index;
918         u32 prev_type = 0;
919         int err = 0;
920         int dest_num = 0;
921         bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
922
923         if (!is_valid_attr(dev->mdev, flow_attr))
924                 return ERR_PTR(-EINVAL);
925
926         if (dev->is_rep && is_egress)
927                 return ERR_PTR(-EINVAL);
928
929         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
930         handler = kzalloc(sizeof(*handler), GFP_KERNEL);
931         if (!handler || !spec) {
932                 err = -ENOMEM;
933                 goto free;
934         }
935
936         INIT_LIST_HEAD(&handler->list);
937
938         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
939                 err = parse_flow_attr(dev->mdev, spec,
940                                       ib_flow, flow_attr, &flow_act,
941                                       prev_type);
942                 if (err < 0)
943                         goto free;
944
945                 prev_type = ((union ib_flow_spec *)ib_flow)->type;
946                 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
947         }
948
949         if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
950                 memcpy(&dest_arr[0], dst, sizeof(*dst));
951                 dest_num++;
952         }
953
954         if (!flow_is_multicast_only(flow_attr))
955                 set_underlay_qp(dev, spec, underlay_qpn);
956
957         if (dev->is_rep) {
958                 struct mlx5_eswitch_rep *rep;
959
960                 rep = dev->port[flow_attr->port - 1].rep;
961                 if (!rep) {
962                         err = -EINVAL;
963                         goto free;
964                 }
965
966                 mlx5_ib_set_rule_source_port(dev, spec, rep);
967         }
968
969         spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
970
971         if (is_egress &&
972             !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
973                 err = -EINVAL;
974                 goto free;
975         }
976
977         if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
978                 struct mlx5_ib_mcounters *mcounters;
979
980                 err = mlx5_ib_flow_counters_set_data(flow_act.counters, ucmd);
981                 if (err)
982                         goto free;
983
984                 mcounters = to_mcounters(flow_act.counters);
985                 handler->ibcounters = flow_act.counters;
986                 dest_arr[dest_num].type =
987                         MLX5_FLOW_DESTINATION_TYPE_COUNTER;
988                 dest_arr[dest_num].counter_id =
989                         mlx5_fc_id(mcounters->hw_cntrs_hndl);
990                 dest_num++;
991         }
992
993         if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
994                 if (!dest_num)
995                         rule_dst = NULL;
996         } else {
997                 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)
998                         flow_act.action |=
999                                 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1000                 if (is_egress)
1001                         flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1002                 else if (dest_num)
1003                         flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1004         }
1005
1006         if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG)  &&
1007             (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1008              flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
1009                 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
1010                              spec->flow_context.flow_tag, flow_attr->type);
1011                 err = -EINVAL;
1012                 goto free;
1013         }
1014         handler->rule = mlx5_add_flow_rules(ft, spec,
1015                                             &flow_act,
1016                                             rule_dst, dest_num);
1017
1018         if (IS_ERR(handler->rule)) {
1019                 err = PTR_ERR(handler->rule);
1020                 goto free;
1021         }
1022
1023         ft_prio->refcount++;
1024         handler->prio = ft_prio;
1025         handler->dev = dev;
1026
1027         ft_prio->flow_table = ft;
1028 free:
1029         if (err && handler) {
1030                 mlx5_ib_counters_clear_description(handler->ibcounters);
1031                 kfree(handler);
1032         }
1033         kvfree(spec);
1034         return err ? ERR_PTR(err) : handler;
1035 }
1036
1037 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1038                                                      struct mlx5_ib_flow_prio *ft_prio,
1039                                                      const struct ib_flow_attr *flow_attr,
1040                                                      struct mlx5_flow_destination *dst)
1041 {
1042         return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
1043 }
1044
1045 enum {
1046         LEFTOVERS_MC,
1047         LEFTOVERS_UC,
1048 };
1049
1050 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
1051                                                           struct mlx5_ib_flow_prio *ft_prio,
1052                                                           struct ib_flow_attr *flow_attr,
1053                                                           struct mlx5_flow_destination *dst)
1054 {
1055         struct mlx5_ib_flow_handler *handler_ucast = NULL;
1056         struct mlx5_ib_flow_handler *handler = NULL;
1057
1058         static struct {
1059                 struct ib_flow_attr     flow_attr;
1060                 struct ib_flow_spec_eth eth_flow;
1061         } leftovers_specs[] = {
1062                 [LEFTOVERS_MC] = {
1063                         .flow_attr = {
1064                                 .num_of_specs = 1,
1065                                 .size = sizeof(leftovers_specs[0])
1066                         },
1067                         .eth_flow = {
1068                                 .type = IB_FLOW_SPEC_ETH,
1069                                 .size = sizeof(struct ib_flow_spec_eth),
1070                                 .mask = {.dst_mac = {0x1} },
1071                                 .val =  {.dst_mac = {0x1} }
1072                         }
1073                 },
1074                 [LEFTOVERS_UC] = {
1075                         .flow_attr = {
1076                                 .num_of_specs = 1,
1077                                 .size = sizeof(leftovers_specs[0])
1078                         },
1079                         .eth_flow = {
1080                                 .type = IB_FLOW_SPEC_ETH,
1081                                 .size = sizeof(struct ib_flow_spec_eth),
1082                                 .mask = {.dst_mac = {0x1} },
1083                                 .val = {.dst_mac = {} }
1084                         }
1085                 }
1086         };
1087
1088         handler = create_flow_rule(dev, ft_prio,
1089                                    &leftovers_specs[LEFTOVERS_MC].flow_attr,
1090                                    dst);
1091         if (!IS_ERR(handler) &&
1092             flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
1093                 handler_ucast = create_flow_rule(dev, ft_prio,
1094                                                  &leftovers_specs[LEFTOVERS_UC].flow_attr,
1095                                                  dst);
1096                 if (IS_ERR(handler_ucast)) {
1097                         mlx5_del_flow_rules(handler->rule);
1098                         ft_prio->refcount--;
1099                         kfree(handler);
1100                         handler = handler_ucast;
1101                 } else {
1102                         list_add(&handler_ucast->list, &handler->list);
1103                 }
1104         }
1105
1106         return handler;
1107 }
1108
1109 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
1110                                                         struct mlx5_ib_flow_prio *ft_rx,
1111                                                         struct mlx5_ib_flow_prio *ft_tx,
1112                                                         struct mlx5_flow_destination *dst)
1113 {
1114         struct mlx5_ib_flow_handler *handler_rx;
1115         struct mlx5_ib_flow_handler *handler_tx;
1116         int err;
1117         static const struct ib_flow_attr flow_attr  = {
1118                 .num_of_specs = 0,
1119                 .size = sizeof(flow_attr)
1120         };
1121
1122         handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
1123         if (IS_ERR(handler_rx)) {
1124                 err = PTR_ERR(handler_rx);
1125                 goto err;
1126         }
1127
1128         handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
1129         if (IS_ERR(handler_tx)) {
1130                 err = PTR_ERR(handler_tx);
1131                 goto err_tx;
1132         }
1133
1134         list_add(&handler_tx->list, &handler_rx->list);
1135
1136         return handler_rx;
1137
1138 err_tx:
1139         mlx5_del_flow_rules(handler_rx->rule);
1140         ft_rx->refcount--;
1141         kfree(handler_rx);
1142 err:
1143         return ERR_PTR(err);
1144 }
1145
1146
1147 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
1148                                            struct ib_flow_attr *flow_attr,
1149                                            int domain,
1150                                            struct ib_udata *udata)
1151 {
1152         struct mlx5_ib_dev *dev = to_mdev(qp->device);
1153         struct mlx5_ib_qp *mqp = to_mqp(qp);
1154         struct mlx5_ib_flow_handler *handler = NULL;
1155         struct mlx5_flow_destination *dst = NULL;
1156         struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
1157         struct mlx5_ib_flow_prio *ft_prio;
1158         bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
1159         struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
1160         size_t min_ucmd_sz, required_ucmd_sz;
1161         int err;
1162         int underlay_qpn;
1163
1164         if (udata && udata->inlen) {
1165                 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
1166                                 sizeof(ucmd_hdr.reserved);
1167                 if (udata->inlen < min_ucmd_sz)
1168                         return ERR_PTR(-EOPNOTSUPP);
1169
1170                 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
1171                 if (err)
1172                         return ERR_PTR(err);
1173
1174                 /* currently supports only one counters data */
1175                 if (ucmd_hdr.ncounters_data > 1)
1176                         return ERR_PTR(-EINVAL);
1177
1178                 required_ucmd_sz = min_ucmd_sz +
1179                         sizeof(struct mlx5_ib_flow_counters_data) *
1180                         ucmd_hdr.ncounters_data;
1181                 if (udata->inlen > required_ucmd_sz &&
1182                     !ib_is_udata_cleared(udata, required_ucmd_sz,
1183                                          udata->inlen - required_ucmd_sz))
1184                         return ERR_PTR(-EOPNOTSUPP);
1185
1186                 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
1187                 if (!ucmd)
1188                         return ERR_PTR(-ENOMEM);
1189
1190                 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
1191                 if (err)
1192                         goto free_ucmd;
1193         }
1194
1195         if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
1196                 err = -ENOMEM;
1197                 goto free_ucmd;
1198         }
1199
1200         if (domain != IB_FLOW_DOMAIN_USER ||
1201             flow_attr->port > dev->num_ports ||
1202             (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
1203                                   IB_FLOW_ATTR_FLAGS_EGRESS))) {
1204                 err = -EINVAL;
1205                 goto free_ucmd;
1206         }
1207
1208         if (is_egress &&
1209             (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1210              flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
1211                 err = -EINVAL;
1212                 goto free_ucmd;
1213         }
1214
1215         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1216         if (!dst) {
1217                 err = -ENOMEM;
1218                 goto free_ucmd;
1219         }
1220
1221         mutex_lock(&dev->flow_db->lock);
1222
1223         ft_prio = get_flow_table(dev, flow_attr,
1224                                  is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
1225         if (IS_ERR(ft_prio)) {
1226                 err = PTR_ERR(ft_prio);
1227                 goto unlock;
1228         }
1229         if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
1230                 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
1231                 if (IS_ERR(ft_prio_tx)) {
1232                         err = PTR_ERR(ft_prio_tx);
1233                         ft_prio_tx = NULL;
1234                         goto destroy_ft;
1235                 }
1236         }
1237
1238         if (is_egress) {
1239                 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
1240         } else {
1241                 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1242                 if (mqp->is_rss)
1243                         dst->tir_num = mqp->rss_qp.tirn;
1244                 else
1245                         dst->tir_num = mqp->raw_packet_qp.rq.tirn;
1246         }
1247
1248         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1249                 underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ?
1250                                        mqp->underlay_qpn :
1251                                        0;
1252                 handler = _create_flow_rule(dev, ft_prio, flow_attr, dst,
1253                                             underlay_qpn, ucmd);
1254         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1255                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
1256                 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
1257                                                 dst);
1258         } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
1259                 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
1260         } else {
1261                 err = -EINVAL;
1262                 goto destroy_ft;
1263         }
1264
1265         if (IS_ERR(handler)) {
1266                 err = PTR_ERR(handler);
1267                 handler = NULL;
1268                 goto destroy_ft;
1269         }
1270
1271         mutex_unlock(&dev->flow_db->lock);
1272         kfree(dst);
1273         kfree(ucmd);
1274
1275         return &handler->ibflow;
1276
1277 destroy_ft:
1278         put_flow_table(dev, ft_prio, false);
1279         if (ft_prio_tx)
1280                 put_flow_table(dev, ft_prio_tx, false);
1281 unlock:
1282         mutex_unlock(&dev->flow_db->lock);
1283         kfree(dst);
1284 free_ucmd:
1285         kfree(ucmd);
1286         return ERR_PTR(err);
1287 }
1288
1289 static struct mlx5_ib_flow_prio *
1290 _get_flow_table(struct mlx5_ib_dev *dev,
1291                 struct mlx5_ib_flow_matcher *fs_matcher,
1292                 bool mcast)
1293 {
1294         struct mlx5_flow_namespace *ns = NULL;
1295         struct mlx5_ib_flow_prio *prio = NULL;
1296         int max_table_size = 0;
1297         bool esw_encap;
1298         u32 flags = 0;
1299         int priority;
1300
1301         if (mcast)
1302                 priority = MLX5_IB_FLOW_MCAST_PRIO;
1303         else
1304                 priority = ib_prio_to_core_prio(fs_matcher->priority, false);
1305
1306         esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
1307                 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1308         if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
1309                 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
1310                                         log_max_ft_size));
1311                 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
1312                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
1313                 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
1314                                               reformat_l3_tunnel_to_l2) &&
1315                     !esw_encap)
1316                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1317         } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
1318                 max_table_size = BIT(
1319                         MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
1320                 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap)
1321                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1322         } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
1323                 max_table_size = BIT(
1324                         MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
1325                 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
1326                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
1327                 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) &&
1328                     esw_encap)
1329                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
1330                 priority = FDB_BYPASS_PATH;
1331         } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
1332                 max_table_size =
1333                         BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
1334                                                        log_max_ft_size));
1335                 priority = fs_matcher->priority;
1336         } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
1337                 max_table_size =
1338                         BIT(MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
1339                                                        log_max_ft_size));
1340                 priority = fs_matcher->priority;
1341         }
1342
1343         max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
1344
1345         ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
1346         if (!ns)
1347                 return ERR_PTR(-EOPNOTSUPP);
1348
1349         if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
1350                 prio = &dev->flow_db->prios[priority];
1351         else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
1352                 prio = &dev->flow_db->egress_prios[priority];
1353         else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
1354                 prio = &dev->flow_db->fdb;
1355         else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX)
1356                 prio = &dev->flow_db->rdma_rx[priority];
1357         else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX)
1358                 prio = &dev->flow_db->rdma_tx[priority];
1359
1360         if (!prio)
1361                 return ERR_PTR(-EINVAL);
1362
1363         if (prio->flow_table)
1364                 return prio;
1365
1366         return _get_prio(ns, prio, priority, max_table_size,
1367                          MLX5_FS_MAX_TYPES, flags);
1368 }
1369
1370 static struct mlx5_ib_flow_handler *
1371 _create_raw_flow_rule(struct mlx5_ib_dev *dev,
1372                       struct mlx5_ib_flow_prio *ft_prio,
1373                       struct mlx5_flow_destination *dst,
1374                       struct mlx5_ib_flow_matcher  *fs_matcher,
1375                       struct mlx5_flow_context *flow_context,
1376                       struct mlx5_flow_act *flow_act,
1377                       void *cmd_in, int inlen,
1378                       int dst_num)
1379 {
1380         struct mlx5_ib_flow_handler *handler;
1381         struct mlx5_flow_spec *spec;
1382         struct mlx5_flow_table *ft = ft_prio->flow_table;
1383         int err = 0;
1384
1385         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1386         handler = kzalloc(sizeof(*handler), GFP_KERNEL);
1387         if (!handler || !spec) {
1388                 err = -ENOMEM;
1389                 goto free;
1390         }
1391
1392         INIT_LIST_HEAD(&handler->list);
1393
1394         memcpy(spec->match_value, cmd_in, inlen);
1395         memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
1396                fs_matcher->mask_len);
1397         spec->match_criteria_enable = fs_matcher->match_criteria_enable;
1398         spec->flow_context = *flow_context;
1399
1400         handler->rule = mlx5_add_flow_rules(ft, spec,
1401                                             flow_act, dst, dst_num);
1402
1403         if (IS_ERR(handler->rule)) {
1404                 err = PTR_ERR(handler->rule);
1405                 goto free;
1406         }
1407
1408         ft_prio->refcount++;
1409         handler->prio = ft_prio;
1410         handler->dev = dev;
1411         ft_prio->flow_table = ft;
1412
1413 free:
1414         if (err)
1415                 kfree(handler);
1416         kvfree(spec);
1417         return err ? ERR_PTR(err) : handler;
1418 }
1419
1420 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
1421                                 void *match_v)
1422 {
1423         void *match_c;
1424         void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
1425         void *dmac, *dmac_mask;
1426         void *ipv4, *ipv4_mask;
1427
1428         if (!(fs_matcher->match_criteria_enable &
1429               (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
1430                 return false;
1431
1432         match_c = fs_matcher->matcher_mask.match_params;
1433         match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
1434                                            outer_headers);
1435         match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
1436                                            outer_headers);
1437
1438         dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
1439                             dmac_47_16);
1440         dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
1441                                  dmac_47_16);
1442
1443         if (is_multicast_ether_addr(dmac) &&
1444             is_multicast_ether_addr(dmac_mask))
1445                 return true;
1446
1447         ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
1448                             dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1449
1450         ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
1451                                  dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1452
1453         if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
1454             ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
1455                 return true;
1456
1457         return false;
1458 }
1459
1460 static struct mlx5_ib_flow_handler *raw_fs_rule_add(
1461         struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
1462         struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act,
1463         u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type)
1464 {
1465         struct mlx5_flow_destination *dst;
1466         struct mlx5_ib_flow_prio *ft_prio;
1467         struct mlx5_ib_flow_handler *handler;
1468         int dst_num = 0;
1469         bool mcast;
1470         int err;
1471
1472         if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
1473                 return ERR_PTR(-EOPNOTSUPP);
1474
1475         if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
1476                 return ERR_PTR(-ENOMEM);
1477
1478         dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
1479         if (!dst)
1480                 return ERR_PTR(-ENOMEM);
1481
1482         mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
1483         mutex_lock(&dev->flow_db->lock);
1484
1485         ft_prio = _get_flow_table(dev, fs_matcher, mcast);
1486         if (IS_ERR(ft_prio)) {
1487                 err = PTR_ERR(ft_prio);
1488                 goto unlock;
1489         }
1490
1491         if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
1492                 dst[dst_num].type = dest_type;
1493                 dst[dst_num++].tir_num = dest_id;
1494                 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1495         } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
1496                 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
1497                 dst[dst_num++].ft_num = dest_id;
1498                 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1499         } else  if (dest_type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
1500                 dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
1501                 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1502         }
1503
1504
1505         if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1506                 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1507                 dst[dst_num].counter_id = counter_id;
1508                 dst_num++;
1509         }
1510
1511         handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
1512                                         flow_context, flow_act,
1513                                         cmd_in, inlen, dst_num);
1514
1515         if (IS_ERR(handler)) {
1516                 err = PTR_ERR(handler);
1517                 goto destroy_ft;
1518         }
1519
1520         mutex_unlock(&dev->flow_db->lock);
1521         atomic_inc(&fs_matcher->usecnt);
1522         handler->flow_matcher = fs_matcher;
1523
1524         kfree(dst);
1525
1526         return handler;
1527
1528 destroy_ft:
1529         put_flow_table(dev, ft_prio, false);
1530 unlock:
1531         mutex_unlock(&dev->flow_db->lock);
1532         kfree(dst);
1533
1534         return ERR_PTR(err);
1535 }
1536
1537 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
1538 {
1539         u32 flags = 0;
1540
1541         if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
1542                 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
1543
1544         return flags;
1545 }
1546
1547 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED                             \
1548         MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
1549 static struct ib_flow_action *
1550 mlx5_ib_create_flow_action_esp(struct ib_device *device,
1551                                const struct ib_flow_action_attrs_esp *attr,
1552                                struct uverbs_attr_bundle *attrs)
1553 {
1554         struct mlx5_ib_dev *mdev = to_mdev(device);
1555         struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
1556         struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
1557         struct mlx5_ib_flow_action *action;
1558         u64 action_flags;
1559         u64 flags;
1560         int err = 0;
1561
1562         err = uverbs_get_flags64(
1563                 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
1564                 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
1565         if (err)
1566                 return ERR_PTR(err);
1567
1568         flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
1569
1570         /* We current only support a subset of the standard features. Only a
1571          * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
1572          * (with overlap). Full offload mode isn't supported.
1573          */
1574         if (!attr->keymat || attr->replay || attr->encap ||
1575             attr->spi || attr->seq || attr->tfc_pad ||
1576             attr->hard_limit_pkts ||
1577             (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
1578                              IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
1579                 return ERR_PTR(-EOPNOTSUPP);
1580
1581         if (attr->keymat->protocol !=
1582             IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
1583                 return ERR_PTR(-EOPNOTSUPP);
1584
1585         aes_gcm = &attr->keymat->keymat.aes_gcm;
1586
1587         if (aes_gcm->icv_len != 16 ||
1588             aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
1589                 return ERR_PTR(-EOPNOTSUPP);
1590
1591         action = kmalloc(sizeof(*action), GFP_KERNEL);
1592         if (!action)
1593                 return ERR_PTR(-ENOMEM);
1594
1595         action->esp_aes_gcm.ib_flags = attr->flags;
1596         memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
1597                sizeof(accel_attrs.keymat.aes_gcm.aes_key));
1598         accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
1599         memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
1600                sizeof(accel_attrs.keymat.aes_gcm.salt));
1601         memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
1602                sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
1603         accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
1604         accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
1605         accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
1606
1607         accel_attrs.esn = attr->esn;
1608         if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
1609                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
1610         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
1611                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
1612
1613         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
1614                 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
1615
1616         action->esp_aes_gcm.ctx =
1617                 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
1618         if (IS_ERR(action->esp_aes_gcm.ctx)) {
1619                 err = PTR_ERR(action->esp_aes_gcm.ctx);
1620                 goto err_parse;
1621         }
1622
1623         action->esp_aes_gcm.ib_flags = attr->flags;
1624
1625         return &action->ib_action;
1626
1627 err_parse:
1628         kfree(action);
1629         return ERR_PTR(err);
1630 }
1631
1632 static int
1633 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
1634                                const struct ib_flow_action_attrs_esp *attr,
1635                                struct uverbs_attr_bundle *attrs)
1636 {
1637         struct mlx5_ib_flow_action *maction = to_mflow_act(action);
1638         struct mlx5_accel_esp_xfrm_attrs accel_attrs;
1639         int err = 0;
1640
1641         if (attr->keymat || attr->replay || attr->encap ||
1642             attr->spi || attr->seq || attr->tfc_pad ||
1643             attr->hard_limit_pkts ||
1644             (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
1645                              IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
1646                              IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
1647                 return -EOPNOTSUPP;
1648
1649         /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
1650          * be modified.
1651          */
1652         if (!(maction->esp_aes_gcm.ib_flags &
1653               IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
1654             attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
1655                            IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
1656                 return -EINVAL;
1657
1658         memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
1659                sizeof(accel_attrs));
1660
1661         accel_attrs.esn = attr->esn;
1662         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
1663                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
1664         else
1665                 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
1666
1667         err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
1668                                          &accel_attrs);
1669         if (err)
1670                 return err;
1671
1672         maction->esp_aes_gcm.ib_flags &=
1673                 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
1674         maction->esp_aes_gcm.ib_flags |=
1675                 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
1676
1677         return 0;
1678 }
1679
1680 static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
1681 {
1682         switch (maction->flow_action_raw.sub_type) {
1683         case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
1684                 mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
1685                                            maction->flow_action_raw.modify_hdr);
1686                 break;
1687         case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
1688                 mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
1689                                              maction->flow_action_raw.pkt_reformat);
1690                 break;
1691         case MLX5_IB_FLOW_ACTION_DECAP:
1692                 break;
1693         default:
1694                 break;
1695         }
1696 }
1697
1698 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
1699 {
1700         struct mlx5_ib_flow_action *maction = to_mflow_act(action);
1701
1702         switch (action->type) {
1703         case IB_FLOW_ACTION_ESP:
1704                 /*
1705                  * We only support aes_gcm by now, so we implicitly know this is
1706                  * the underline crypto.
1707                  */
1708                 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
1709                 break;
1710         case IB_FLOW_ACTION_UNSPECIFIED:
1711                 destroy_flow_action_raw(maction);
1712                 break;
1713         default:
1714                 WARN_ON(true);
1715                 break;
1716         }
1717
1718         kfree(maction);
1719         return 0;
1720 }
1721
1722 static int
1723 mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
1724                              enum mlx5_flow_namespace_type *namespace)
1725 {
1726         switch (table_type) {
1727         case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
1728                 *namespace = MLX5_FLOW_NAMESPACE_BYPASS;
1729                 break;
1730         case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
1731                 *namespace = MLX5_FLOW_NAMESPACE_EGRESS;
1732                 break;
1733         case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
1734                 *namespace = MLX5_FLOW_NAMESPACE_FDB;
1735                 break;
1736         case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
1737                 *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
1738                 break;
1739         case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX:
1740                 *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX;
1741                 break;
1742         default:
1743                 return -EINVAL;
1744         }
1745
1746         return 0;
1747 }
1748
1749 static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
1750         [MLX5_IB_FLOW_TYPE_NORMAL] = {
1751                 .type = UVERBS_ATTR_TYPE_PTR_IN,
1752                 .u.ptr = {
1753                         .len = sizeof(u16), /* data is priority */
1754                         .min_len = sizeof(u16),
1755                 }
1756         },
1757         [MLX5_IB_FLOW_TYPE_SNIFFER] = {
1758                 .type = UVERBS_ATTR_TYPE_PTR_IN,
1759                 UVERBS_ATTR_NO_DATA(),
1760         },
1761         [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
1762                 .type = UVERBS_ATTR_TYPE_PTR_IN,
1763                 UVERBS_ATTR_NO_DATA(),
1764         },
1765         [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
1766                 .type = UVERBS_ATTR_TYPE_PTR_IN,
1767                 UVERBS_ATTR_NO_DATA(),
1768         },
1769 };
1770
1771 static bool is_flow_dest(void *obj, int *dest_id, int *dest_type)
1772 {
1773         struct devx_obj *devx_obj = obj;
1774         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
1775
1776         switch (opcode) {
1777         case MLX5_CMD_OP_DESTROY_TIR:
1778                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1779                 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
1780                                     obj_id);
1781                 return true;
1782
1783         case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
1784                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1785                 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
1786                                     table_id);
1787                 return true;
1788         default:
1789                 return false;
1790         }
1791 }
1792
1793 static int get_dests(struct uverbs_attr_bundle *attrs,
1794                      struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id,
1795                      int *dest_type, struct ib_qp **qp, u32 *flags)
1796 {
1797         bool dest_devx, dest_qp;
1798         void *devx_obj;
1799         int err;
1800
1801         dest_devx = uverbs_attr_is_valid(attrs,
1802                                          MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
1803         dest_qp = uverbs_attr_is_valid(attrs,
1804                                        MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
1805
1806         *flags = 0;
1807         err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
1808                                  MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS |
1809                                          MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP);
1810         if (err)
1811                 return err;
1812
1813         /* Both flags are not allowed */
1814         if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS &&
1815             *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
1816                 return -EINVAL;
1817
1818         if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
1819                 if (dest_devx && (dest_qp || *flags))
1820                         return -EINVAL;
1821                 else if (dest_qp && *flags)
1822                         return -EINVAL;
1823         }
1824
1825         /* Allow only DEVX object, drop as dest for FDB */
1826         if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx ||
1827              (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
1828                 return -EINVAL;
1829
1830         /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
1831         if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
1832             ((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
1833                 return -EINVAL;
1834
1835         *qp = NULL;
1836         if (dest_devx) {
1837                 devx_obj =
1838                         uverbs_attr_get_obj(attrs,
1839                                             MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
1840
1841                 /* Verify that the given DEVX object is a flow
1842                  * steering destination.
1843                  */
1844                 if (!is_flow_dest(devx_obj, dest_id, dest_type))
1845                         return -EINVAL;
1846                 /* Allow only flow table as dest when inserting to FDB or RDMA_RX */
1847                 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
1848                      fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
1849                     *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1850                         return -EINVAL;
1851         } else if (dest_qp) {
1852                 struct mlx5_ib_qp *mqp;
1853
1854                 *qp = uverbs_attr_get_obj(attrs,
1855                                           MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
1856                 if (IS_ERR(*qp))
1857                         return PTR_ERR(*qp);
1858
1859                 if ((*qp)->qp_type != IB_QPT_RAW_PACKET)
1860                         return -EINVAL;
1861
1862                 mqp = to_mqp(*qp);
1863                 if (mqp->is_rss)
1864                         *dest_id = mqp->rss_qp.tirn;
1865                 else
1866                         *dest_id = mqp->raw_packet_qp.rq.tirn;
1867                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1868         } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
1869                    fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
1870                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
1871         }
1872
1873         if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1874             (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
1875              fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX))
1876                 return -EINVAL;
1877
1878         return 0;
1879 }
1880
1881 static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
1882 {
1883         struct devx_obj *devx_obj = obj;
1884         u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
1885
1886         if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
1887
1888                 if (offset && offset >= devx_obj->flow_counter_bulk_size)
1889                         return false;
1890
1891                 *counter_id = MLX5_GET(dealloc_flow_counter_in,
1892                                        devx_obj->dinbox,
1893                                        flow_counter_id);
1894                 *counter_id += offset;
1895                 return true;
1896         }
1897
1898         return false;
1899 }
1900
1901 #define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
1902 static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
1903         struct uverbs_attr_bundle *attrs)
1904 {
1905         struct mlx5_flow_context flow_context = {.flow_tag =
1906                 MLX5_FS_DEFAULT_FLOW_TAG};
1907         u32 *offset_attr, offset = 0, counter_id = 0;
1908         int dest_id, dest_type = -1, inlen, len, ret, i;
1909         struct mlx5_ib_flow_handler *flow_handler;
1910         struct mlx5_ib_flow_matcher *fs_matcher;
1911         struct ib_uobject **arr_flow_actions;
1912         struct ib_uflow_resources *uflow_res;
1913         struct mlx5_flow_act flow_act = {};
1914         struct ib_qp *qp = NULL;
1915         void *devx_obj, *cmd_in;
1916         struct ib_uobject *uobj;
1917         struct mlx5_ib_dev *dev;
1918         u32 flags;
1919
1920         if (!capable(CAP_NET_RAW))
1921                 return -EPERM;
1922
1923         fs_matcher = uverbs_attr_get_obj(attrs,
1924                                          MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
1925         uobj =  uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
1926         dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1927
1928         if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags))
1929                 return -EINVAL;
1930
1931         if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS)
1932                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
1933
1934         if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
1935                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1936
1937         len = uverbs_attr_get_uobjs_arr(attrs,
1938                 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
1939         if (len) {
1940                 devx_obj = arr_flow_actions[0]->object;
1941
1942                 if (uverbs_attr_is_valid(attrs,
1943                                          MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) {
1944
1945                         int num_offsets = uverbs_attr_ptr_get_array_size(
1946                                 attrs,
1947                                 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
1948                                 sizeof(u32));
1949
1950                         if (num_offsets != 1)
1951                                 return -EINVAL;
1952
1953                         offset_attr = uverbs_attr_get_alloced_ptr(
1954                                 attrs,
1955                                 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET);
1956                         offset = *offset_attr;
1957                 }
1958
1959                 if (!is_flow_counter(devx_obj, offset, &counter_id))
1960                         return -EINVAL;
1961
1962                 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1963         }
1964
1965         cmd_in = uverbs_attr_get_alloced_ptr(
1966                 attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
1967         inlen = uverbs_attr_get_len(attrs,
1968                                     MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
1969
1970         uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
1971         if (!uflow_res)
1972                 return -ENOMEM;
1973
1974         len = uverbs_attr_get_uobjs_arr(attrs,
1975                 MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
1976         for (i = 0; i < len; i++) {
1977                 struct mlx5_ib_flow_action *maction =
1978                         to_mflow_act(arr_flow_actions[i]->object);
1979
1980                 ret = parse_flow_flow_action(maction, false, &flow_act);
1981                 if (ret)
1982                         goto err_out;
1983                 flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE,
1984                                    arr_flow_actions[i]->object);
1985         }
1986
1987         ret = uverbs_copy_from(&flow_context.flow_tag, attrs,
1988                                MLX5_IB_ATTR_CREATE_FLOW_TAG);
1989         if (!ret) {
1990                 if (flow_context.flow_tag >= BIT(24)) {
1991                         ret = -EINVAL;
1992                         goto err_out;
1993                 }
1994                 flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
1995         }
1996
1997         flow_handler =
1998                 raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act,
1999                                 counter_id, cmd_in, inlen, dest_id, dest_type);
2000         if (IS_ERR(flow_handler)) {
2001                 ret = PTR_ERR(flow_handler);
2002                 goto err_out;
2003         }
2004
2005         ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res);
2006
2007         return 0;
2008 err_out:
2009         ib_uverbs_flow_resources_free(uflow_res);
2010         return ret;
2011 }
2012
2013 static int flow_matcher_cleanup(struct ib_uobject *uobject,
2014                                 enum rdma_remove_reason why,
2015                                 struct uverbs_attr_bundle *attrs)
2016 {
2017         struct mlx5_ib_flow_matcher *obj = uobject->object;
2018         int ret;
2019
2020         ret = ib_destroy_usecnt(&obj->usecnt, why, uobject);
2021         if (ret)
2022                 return ret;
2023
2024         kfree(obj);
2025         return 0;
2026 }
2027
2028 static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
2029                               struct mlx5_ib_flow_matcher *obj)
2030 {
2031         enum mlx5_ib_uapi_flow_table_type ft_type =
2032                 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX;
2033         u32 flags;
2034         int err;
2035
2036         /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older
2037          * users should switch to it. We leave this to not break userspace
2038          */
2039         if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) &&
2040             uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS))
2041                 return -EINVAL;
2042
2043         if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) {
2044                 err = uverbs_get_const(&ft_type, attrs,
2045                                        MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE);
2046                 if (err)
2047                         return err;
2048
2049                 err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type);
2050                 if (err)
2051                         return err;
2052
2053                 return 0;
2054         }
2055
2056         if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) {
2057                 err = uverbs_get_flags32(&flags, attrs,
2058                                          MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
2059                                          IB_FLOW_ATTR_FLAGS_EGRESS);
2060                 if (err)
2061                         return err;
2062
2063                 if (flags) {
2064                         mlx5_ib_ft_type_to_namespace(
2065                                 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
2066                                 &obj->ns_type);
2067                         return 0;
2068                 }
2069         }
2070
2071         obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
2072
2073         return 0;
2074 }
2075
2076 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
2077         struct uverbs_attr_bundle *attrs)
2078 {
2079         struct ib_uobject *uobj = uverbs_attr_get_uobject(
2080                 attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
2081         struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
2082         struct mlx5_ib_flow_matcher *obj;
2083         int err;
2084
2085         obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
2086         if (!obj)
2087                 return -ENOMEM;
2088
2089         obj->mask_len = uverbs_attr_get_len(
2090                 attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
2091         err = uverbs_copy_from(&obj->matcher_mask,
2092                                attrs,
2093                                MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
2094         if (err)
2095                 goto end;
2096
2097         obj->flow_type = uverbs_attr_get_enum_id(
2098                 attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
2099
2100         if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
2101                 err = uverbs_copy_from(&obj->priority,
2102                                        attrs,
2103                                        MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
2104                 if (err)
2105                         goto end;
2106         }
2107
2108         err = uverbs_copy_from(&obj->match_criteria_enable,
2109                                attrs,
2110                                MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
2111         if (err)
2112                 goto end;
2113
2114         err = mlx5_ib_matcher_ns(attrs, obj);
2115         if (err)
2116                 goto end;
2117
2118         uobj->object = obj;
2119         obj->mdev = dev->mdev;
2120         atomic_set(&obj->usecnt, 0);
2121         return 0;
2122
2123 end:
2124         kfree(obj);
2125         return err;
2126 }
2127
2128 static struct ib_flow_action *
2129 mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
2130                              enum mlx5_ib_uapi_flow_table_type ft_type,
2131                              u8 num_actions, void *in)
2132 {
2133         enum mlx5_flow_namespace_type namespace;
2134         struct mlx5_ib_flow_action *maction;
2135         int ret;
2136
2137         ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
2138         if (ret)
2139                 return ERR_PTR(-EINVAL);
2140
2141         maction = kzalloc(sizeof(*maction), GFP_KERNEL);
2142         if (!maction)
2143                 return ERR_PTR(-ENOMEM);
2144
2145         maction->flow_action_raw.modify_hdr =
2146                 mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in);
2147
2148         if (IS_ERR(maction->flow_action_raw.modify_hdr)) {
2149                 ret = PTR_ERR(maction->flow_action_raw.modify_hdr);
2150                 kfree(maction);
2151                 return ERR_PTR(ret);
2152         }
2153         maction->flow_action_raw.sub_type =
2154                 MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
2155         maction->flow_action_raw.dev = dev;
2156
2157         return &maction->ib_action;
2158 }
2159
2160 static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
2161 {
2162         return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2163                                          max_modify_header_actions) ||
2164                MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
2165                                          max_modify_header_actions) ||
2166                MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
2167                                          max_modify_header_actions);
2168 }
2169
2170 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
2171         struct uverbs_attr_bundle *attrs)
2172 {
2173         struct ib_uobject *uobj = uverbs_attr_get_uobject(
2174                 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
2175         struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
2176         enum mlx5_ib_uapi_flow_table_type ft_type;
2177         struct ib_flow_action *action;
2178         int num_actions;
2179         void *in;
2180         int ret;
2181
2182         if (!mlx5_ib_modify_header_supported(mdev))
2183                 return -EOPNOTSUPP;
2184
2185         in = uverbs_attr_get_alloced_ptr(attrs,
2186                 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
2187
2188         num_actions = uverbs_attr_ptr_get_array_size(
2189                 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
2190                 MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto));
2191         if (num_actions < 0)
2192                 return num_actions;
2193
2194         ret = uverbs_get_const(&ft_type, attrs,
2195                                MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
2196         if (ret)
2197                 return ret;
2198         action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
2199         if (IS_ERR(action))
2200                 return PTR_ERR(action);
2201
2202         uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev,
2203                                        IB_FLOW_ACTION_UNSPECIFIED);
2204
2205         return 0;
2206 }
2207
2208 static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
2209                                                       u8 packet_reformat_type,
2210                                                       u8 ft_type)
2211 {
2212         switch (packet_reformat_type) {
2213         case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
2214                 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
2215                         return MLX5_CAP_FLOWTABLE(ibdev->mdev,
2216                                                   encap_general_header);
2217                 break;
2218         case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
2219                 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
2220                         return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
2221                                 reformat_l2_to_l3_tunnel);
2222                 break;
2223         case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
2224                 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
2225                         return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
2226                                 reformat_l3_tunnel_to_l2);
2227                 break;
2228         case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
2229                 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
2230                         return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
2231                 break;
2232         default:
2233                 break;
2234         }
2235
2236         return false;
2237 }
2238
2239 static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
2240 {
2241         switch (dv_prt) {
2242         case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
2243                 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
2244                 break;
2245         case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
2246                 *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2247                 break;
2248         case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
2249                 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
2250                 break;
2251         default:
2252                 return -EINVAL;
2253         }
2254
2255         return 0;
2256 }
2257
2258 static int mlx5_ib_flow_action_create_packet_reformat_ctx(
2259         struct mlx5_ib_dev *dev,
2260         struct mlx5_ib_flow_action *maction,
2261         u8 ft_type, u8 dv_prt,
2262         void *in, size_t len)
2263 {
2264         enum mlx5_flow_namespace_type namespace;
2265         u8 prm_prt;
2266         int ret;
2267
2268         ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
2269         if (ret)
2270                 return ret;
2271
2272         ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
2273         if (ret)
2274                 return ret;
2275
2276         maction->flow_action_raw.pkt_reformat =
2277                 mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
2278                                            in, namespace);
2279         if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
2280                 ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
2281                 return ret;
2282         }
2283
2284         maction->flow_action_raw.sub_type =
2285                 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
2286         maction->flow_action_raw.dev = dev;
2287
2288         return 0;
2289 }
2290
2291 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
2292         struct uverbs_attr_bundle *attrs)
2293 {
2294         struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
2295                 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
2296         struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
2297         enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
2298         enum mlx5_ib_uapi_flow_table_type ft_type;
2299         struct mlx5_ib_flow_action *maction;
2300         int ret;
2301
2302         ret = uverbs_get_const(&ft_type, attrs,
2303                                MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
2304         if (ret)
2305                 return ret;
2306
2307         ret = uverbs_get_const(&dv_prt, attrs,
2308                                MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
2309         if (ret)
2310                 return ret;
2311
2312         if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
2313                 return -EOPNOTSUPP;
2314
2315         maction = kzalloc(sizeof(*maction), GFP_KERNEL);
2316         if (!maction)
2317                 return -ENOMEM;
2318
2319         if (dv_prt ==
2320             MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
2321                 maction->flow_action_raw.sub_type =
2322                         MLX5_IB_FLOW_ACTION_DECAP;
2323                 maction->flow_action_raw.dev = mdev;
2324         } else {
2325                 void *in;
2326                 int len;
2327
2328                 in = uverbs_attr_get_alloced_ptr(attrs,
2329                         MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
2330                 if (IS_ERR(in)) {
2331                         ret = PTR_ERR(in);
2332                         goto free_maction;
2333                 }
2334
2335                 len = uverbs_attr_get_len(attrs,
2336                         MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
2337
2338                 ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
2339                         maction, ft_type, dv_prt, in, len);
2340                 if (ret)
2341                         goto free_maction;
2342         }
2343
2344         uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev,
2345                                        IB_FLOW_ACTION_UNSPECIFIED);
2346         return 0;
2347
2348 free_maction:
2349         kfree(maction);
2350         return ret;
2351 }
2352
2353 DECLARE_UVERBS_NAMED_METHOD(
2354         MLX5_IB_METHOD_CREATE_FLOW,
2355         UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
2356                         UVERBS_OBJECT_FLOW,
2357                         UVERBS_ACCESS_NEW,
2358                         UA_MANDATORY),
2359         UVERBS_ATTR_PTR_IN(
2360                 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
2361                 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
2362                 UA_MANDATORY,
2363                 UA_ALLOC_AND_COPY),
2364         UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
2365                         MLX5_IB_OBJECT_FLOW_MATCHER,
2366                         UVERBS_ACCESS_READ,
2367                         UA_MANDATORY),
2368         UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
2369                         UVERBS_OBJECT_QP,
2370                         UVERBS_ACCESS_READ),
2371         UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
2372                         MLX5_IB_OBJECT_DEVX_OBJ,
2373                         UVERBS_ACCESS_READ),
2374         UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
2375                              UVERBS_OBJECT_FLOW_ACTION,
2376                              UVERBS_ACCESS_READ, 1,
2377                              MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS,
2378                              UA_OPTIONAL),
2379         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG,
2380                            UVERBS_ATTR_TYPE(u32),
2381                            UA_OPTIONAL),
2382         UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
2383                              MLX5_IB_OBJECT_DEVX_OBJ,
2384                              UVERBS_ACCESS_READ, 1, 1,
2385                              UA_OPTIONAL),
2386         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
2387                            UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
2388                            UA_OPTIONAL,
2389                            UA_ALLOC_AND_COPY),
2390         UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
2391                              enum mlx5_ib_create_flow_flags,
2392                              UA_OPTIONAL));
2393
2394 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2395         MLX5_IB_METHOD_DESTROY_FLOW,
2396         UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
2397                         UVERBS_OBJECT_FLOW,
2398                         UVERBS_ACCESS_DESTROY,
2399                         UA_MANDATORY));
2400
2401 ADD_UVERBS_METHODS(mlx5_ib_fs,
2402                    UVERBS_OBJECT_FLOW,
2403                    &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
2404                    &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
2405
2406 DECLARE_UVERBS_NAMED_METHOD(
2407         MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
2408         UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
2409                         UVERBS_OBJECT_FLOW_ACTION,
2410                         UVERBS_ACCESS_NEW,
2411                         UA_MANDATORY),
2412         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
2413                            UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
2414                                    set_add_copy_action_in_auto)),
2415                            UA_MANDATORY,
2416                            UA_ALLOC_AND_COPY),
2417         UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
2418                              enum mlx5_ib_uapi_flow_table_type,
2419                              UA_MANDATORY));
2420
2421 DECLARE_UVERBS_NAMED_METHOD(
2422         MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
2423         UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
2424                         UVERBS_OBJECT_FLOW_ACTION,
2425                         UVERBS_ACCESS_NEW,
2426                         UA_MANDATORY),
2427         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
2428                            UVERBS_ATTR_MIN_SIZE(1),
2429                            UA_ALLOC_AND_COPY,
2430                            UA_OPTIONAL),
2431         UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
2432                              enum mlx5_ib_uapi_flow_action_packet_reformat_type,
2433                              UA_MANDATORY),
2434         UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
2435                              enum mlx5_ib_uapi_flow_table_type,
2436                              UA_MANDATORY));
2437
2438 ADD_UVERBS_METHODS(
2439         mlx5_ib_flow_actions,
2440         UVERBS_OBJECT_FLOW_ACTION,
2441         &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
2442         &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
2443
2444 DECLARE_UVERBS_NAMED_METHOD(
2445         MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
2446         UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
2447                         MLX5_IB_OBJECT_FLOW_MATCHER,
2448                         UVERBS_ACCESS_NEW,
2449                         UA_MANDATORY),
2450         UVERBS_ATTR_PTR_IN(
2451                 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
2452                 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
2453                 UA_MANDATORY),
2454         UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
2455                             mlx5_ib_flow_type,
2456                             UA_MANDATORY),
2457         UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
2458                            UVERBS_ATTR_TYPE(u8),
2459                            UA_MANDATORY),
2460         UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
2461                              enum ib_flow_flags,
2462                              UA_OPTIONAL),
2463         UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE,
2464                              enum mlx5_ib_uapi_flow_table_type,
2465                              UA_OPTIONAL));
2466
2467 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2468         MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
2469         UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
2470                         MLX5_IB_OBJECT_FLOW_MATCHER,
2471                         UVERBS_ACCESS_DESTROY,
2472                         UA_MANDATORY));
2473
2474 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
2475                             UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
2476                             &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
2477                             &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
2478
2479 const struct uapi_definition mlx5_ib_flow_defs[] = {
2480         UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2481                 MLX5_IB_OBJECT_FLOW_MATCHER),
2482         UAPI_DEF_CHAIN_OBJ_TREE(
2483                 UVERBS_OBJECT_FLOW,
2484                 &mlx5_ib_fs),
2485         UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
2486                                 &mlx5_ib_flow_actions),
2487         {},
2488 };
2489
2490 static const struct ib_device_ops flow_ops = {
2491         .create_flow = mlx5_ib_create_flow,
2492         .destroy_flow = mlx5_ib_destroy_flow,
2493         .destroy_flow_action = mlx5_ib_destroy_flow_action,
2494 };
2495
2496 static const struct ib_device_ops flow_ipsec_ops = {
2497         .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
2498         .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
2499 };
2500
2501 int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
2502 {
2503         dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
2504
2505         if (!dev->flow_db)
2506                 return -ENOMEM;
2507
2508         mutex_init(&dev->flow_db->lock);
2509
2510         ib_set_device_ops(&dev->ib_dev, &flow_ops);
2511         if (mlx5_accel_ipsec_device_caps(dev->mdev) &
2512             MLX5_ACCEL_IPSEC_CAP_DEVICE)
2513                 ib_set_device_ops(&dev->ib_dev, &flow_ipsec_ops);
2514
2515         return 0;
2516 }
This page took 0.180407 seconds and 4 git commands to generate.