2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
39 #include "mlx5_core.h"
42 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
43 struct mlx5_flow_table *ft,
50 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
51 struct mlx5_flow_table *ft,
52 unsigned int log_size,
53 struct mlx5_flow_table *next_ft)
58 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
59 struct mlx5_flow_table *ft)
64 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
65 struct mlx5_flow_table *ft,
66 struct mlx5_flow_table *next_ft)
71 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
72 struct mlx5_flow_table *ft,
74 struct mlx5_flow_group *fg)
79 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
80 struct mlx5_flow_table *ft,
81 struct mlx5_flow_group *fg)
86 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
87 struct mlx5_flow_table *ft,
88 struct mlx5_flow_group *group,
94 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
95 struct mlx5_flow_table *ft,
96 struct mlx5_flow_group *group,
103 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
104 struct mlx5_flow_table *ft,
110 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
114 enum mlx5_flow_namespace_type namespace,
115 struct mlx5_pkt_reformat *pkt_reformat)
120 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
121 struct mlx5_pkt_reformat *pkt_reformat)
125 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
126 u8 namespace, u8 num_actions,
127 void *modify_actions,
128 struct mlx5_modify_hdr *modify_hdr)
133 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
134 struct mlx5_modify_hdr *modify_hdr)
138 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
139 struct mlx5_flow_root_namespace *peer_ns)
144 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
149 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
154 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
155 struct mlx5_flow_table *ft, u32 underlay_qpn,
158 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
159 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
160 struct mlx5_core_dev *dev = ns->dev;
162 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
166 MLX5_SET(set_flow_table_root_in, in, opcode,
167 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
168 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
171 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
172 MLX5_SET(set_flow_table_root_in, in, table_id, 0);
174 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
175 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
178 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
180 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
181 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
184 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
187 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
188 struct mlx5_flow_table *ft,
189 unsigned int log_size,
190 struct mlx5_flow_table *next_ft)
192 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
193 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
194 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
195 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
196 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
197 struct mlx5_core_dev *dev = ns->dev;
200 MLX5_SET(create_flow_table_in, in, opcode,
201 MLX5_CMD_OP_CREATE_FLOW_TABLE);
203 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
204 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
205 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
207 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
208 MLX5_SET(create_flow_table_in, in, other_vport, 1);
211 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
213 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
215 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
218 switch (ft->op_mod) {
219 case FS_FT_OP_MOD_NORMAL:
221 MLX5_SET(create_flow_table_in, in,
222 flow_table_context.table_miss_action,
223 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
224 MLX5_SET(create_flow_table_in, in,
225 flow_table_context.table_miss_id, next_ft->id);
227 MLX5_SET(create_flow_table_in, in,
228 flow_table_context.table_miss_action,
229 ft->def_miss_action);
233 case FS_FT_OP_MOD_LAG_DEMUX:
234 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
236 MLX5_SET(create_flow_table_in, in,
237 flow_table_context.lag_master_next_table_id,
242 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
244 ft->id = MLX5_GET(create_flow_table_out, out,
249 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
250 struct mlx5_flow_table *ft)
252 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
253 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
254 struct mlx5_core_dev *dev = ns->dev;
256 MLX5_SET(destroy_flow_table_in, in, opcode,
257 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
258 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
259 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
261 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
262 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
265 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
268 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
269 struct mlx5_flow_table *ft,
270 struct mlx5_flow_table *next_ft)
272 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
273 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
274 struct mlx5_core_dev *dev = ns->dev;
276 MLX5_SET(modify_flow_table_in, in, opcode,
277 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
278 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
279 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
281 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
282 MLX5_SET(modify_flow_table_in, in, modify_field_select,
283 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
285 MLX5_SET(modify_flow_table_in, in,
286 flow_table_context.lag_master_next_table_id, next_ft->id);
288 MLX5_SET(modify_flow_table_in, in,
289 flow_table_context.lag_master_next_table_id, 0);
293 MLX5_SET(modify_flow_table_in, in, vport_number,
295 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
297 MLX5_SET(modify_flow_table_in, in, modify_field_select,
298 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
300 MLX5_SET(modify_flow_table_in, in,
301 flow_table_context.table_miss_action,
302 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
303 MLX5_SET(modify_flow_table_in, in,
304 flow_table_context.table_miss_id,
307 MLX5_SET(modify_flow_table_in, in,
308 flow_table_context.table_miss_action,
309 ft->def_miss_action);
313 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
316 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
317 struct mlx5_flow_table *ft,
319 struct mlx5_flow_group *fg)
321 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
322 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
323 struct mlx5_core_dev *dev = ns->dev;
326 MLX5_SET(create_flow_group_in, in, opcode,
327 MLX5_CMD_OP_CREATE_FLOW_GROUP);
328 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
329 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
331 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
332 MLX5_SET(create_flow_group_in, in, other_vport, 1);
335 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
337 fg->id = MLX5_GET(create_flow_group_out, out,
342 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
343 struct mlx5_flow_table *ft,
344 struct mlx5_flow_group *fg)
346 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
347 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
348 struct mlx5_core_dev *dev = ns->dev;
350 MLX5_SET(destroy_flow_group_in, in, opcode,
351 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
352 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
353 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
354 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
356 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
357 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
360 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
363 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
364 struct fs_fte *fte, bool *extended_dest)
366 int fw_log_max_fdb_encap_uplink =
367 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
368 int num_fwd_destinations = 0;
369 struct mlx5_flow_rule *dst;
372 *extended_dest = false;
373 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
376 list_for_each_entry(dst, &fte->node.children, node.list) {
377 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
379 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
380 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
382 num_fwd_destinations++;
384 if (num_fwd_destinations > 1 && num_encap > 0)
385 *extended_dest = true;
387 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
388 mlx5_core_warn(dev, "FW does not support extended destination");
391 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
392 mlx5_core_warn(dev, "FW does not support more than %d encaps",
393 1 << fw_log_max_fdb_encap_uplink);
399 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
400 int opmod, int modify_mask,
401 struct mlx5_flow_table *ft,
405 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
406 bool extended_dest = false;
407 struct mlx5_flow_rule *dst;
408 void *in_flow_context, *vlan;
409 void *in_match_value;
416 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
420 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
422 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
424 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
425 in = kvzalloc(inlen, GFP_KERNEL);
429 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
430 MLX5_SET(set_fte_in, in, op_mod, opmod);
431 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
432 MLX5_SET(set_fte_in, in, table_type, ft->type);
433 MLX5_SET(set_fte_in, in, table_id, ft->id);
434 MLX5_SET(set_fte_in, in, flow_index, fte->index);
435 MLX5_SET(set_fte_in, in, ignore_flow_level,
436 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
439 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
440 MLX5_SET(set_fte_in, in, other_vport, 1);
443 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
444 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
446 MLX5_SET(flow_context, in_flow_context, flow_tag,
447 fte->flow_context.flow_tag);
448 MLX5_SET(flow_context, in_flow_context, flow_source,
449 fte->flow_context.flow_source);
451 MLX5_SET(flow_context, in_flow_context, extended_destination,
456 action = fte->action.action &
457 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
458 MLX5_SET(flow_context, in_flow_context, action, action);
460 MLX5_SET(flow_context, in_flow_context, action,
462 if (fte->action.pkt_reformat)
463 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
464 fte->action.pkt_reformat->id);
466 if (fte->action.modify_hdr)
467 MLX5_SET(flow_context, in_flow_context, modify_header_id,
468 fte->action.modify_hdr->id);
470 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
472 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
473 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
474 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
476 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
478 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
479 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
480 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
482 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
484 memcpy(in_match_value, &fte->val, sizeof(fte->val));
486 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
487 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
490 list_for_each_entry(dst, &fte->node.children, node.list) {
491 unsigned int id, type = dst->dest_attr.type;
493 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
497 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
498 id = dst->dest_attr.ft_num;
499 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
501 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
502 id = dst->dest_attr.ft->id;
504 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
505 id = dst->dest_attr.vport.num;
506 MLX5_SET(dest_format_struct, in_dests,
507 destination_eswitch_owner_vhca_id_valid,
508 !!(dst->dest_attr.vport.flags &
509 MLX5_FLOW_DEST_VPORT_VHCA_ID));
510 MLX5_SET(dest_format_struct, in_dests,
511 destination_eswitch_owner_vhca_id,
512 dst->dest_attr.vport.vhca_id);
514 dst->dest_attr.vport.pkt_reformat) {
515 MLX5_SET(dest_format_struct, in_dests,
517 !!(dst->dest_attr.vport.flags &
518 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
519 MLX5_SET(extended_dest_format, in_dests,
521 dst->dest_attr.vport.pkt_reformat->id);
525 id = dst->dest_attr.tir_num;
528 MLX5_SET(dest_format_struct, in_dests, destination_type,
530 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
531 in_dests += dst_cnt_size;
535 MLX5_SET(flow_context, in_flow_context, destination_list_size,
539 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
540 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
541 log_max_flow_counter,
545 list_for_each_entry(dst, &fte->node.children, node.list) {
546 if (dst->dest_attr.type !=
547 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
550 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
551 dst->dest_attr.counter_id);
552 in_dests += dst_cnt_size;
555 if (list_size > max_list_size) {
560 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
564 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
570 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
571 struct mlx5_flow_table *ft,
572 struct mlx5_flow_group *group,
575 struct mlx5_core_dev *dev = ns->dev;
576 unsigned int group_id = group->id;
578 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
581 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
582 struct mlx5_flow_table *ft,
583 struct mlx5_flow_group *fg,
588 struct mlx5_core_dev *dev = ns->dev;
589 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
590 flow_table_properties_nic_receive.
596 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
599 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
600 struct mlx5_flow_table *ft,
603 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
604 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
605 struct mlx5_core_dev *dev = ns->dev;
607 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
608 MLX5_SET(delete_fte_in, in, table_type, ft->type);
609 MLX5_SET(delete_fte_in, in, table_id, ft->id);
610 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
612 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
613 MLX5_SET(delete_fte_in, in, other_vport, 1);
616 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
619 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
620 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
623 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
624 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
627 MLX5_SET(alloc_flow_counter_in, in, opcode,
628 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
629 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
631 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
633 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
637 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
639 return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
642 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
644 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
645 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
647 MLX5_SET(dealloc_flow_counter_in, in, opcode,
648 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
649 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
650 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
653 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
654 u64 *packets, u64 *bytes)
656 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
657 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
658 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
662 MLX5_SET(query_flow_counter_in, in, opcode,
663 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
664 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
665 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
666 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
670 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
671 *packets = MLX5_GET64(traffic_counter, stats, packets);
672 *bytes = MLX5_GET64(traffic_counter, stats, octets);
676 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
678 return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
679 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
682 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
685 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
686 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
688 MLX5_SET(query_flow_counter_in, in, opcode,
689 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
690 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
691 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
692 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
693 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
696 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
700 enum mlx5_flow_namespace_type namespace,
701 struct mlx5_pkt_reformat *pkt_reformat)
703 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
704 struct mlx5_core_dev *dev = ns->dev;
705 void *packet_reformat_context_in;
712 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
713 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
715 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
717 if (size > max_encap_size) {
718 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
719 size, max_encap_size);
723 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
728 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
729 in, packet_reformat_context);
730 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
731 packet_reformat_context_in,
733 inlen = reformat - (void *)in + size;
735 memset(in, 0, inlen);
736 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
737 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
738 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
739 reformat_data_size, size);
740 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
741 reformat_type, reformat_type);
742 memcpy(reformat, reformat_data, size);
744 memset(out, 0, sizeof(out));
745 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
747 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
748 out, packet_reformat_id);
753 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
754 struct mlx5_pkt_reformat *pkt_reformat)
756 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
757 u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
758 struct mlx5_core_dev *dev = ns->dev;
760 memset(in, 0, sizeof(in));
761 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
762 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
763 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
766 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
769 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
770 u8 namespace, u8 num_actions,
771 void *modify_actions,
772 struct mlx5_modify_hdr *modify_hdr)
774 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
775 int max_actions, actions_size, inlen, err;
776 struct mlx5_core_dev *dev = ns->dev;
782 case MLX5_FLOW_NAMESPACE_FDB:
783 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
784 table_type = FS_FT_FDB;
786 case MLX5_FLOW_NAMESPACE_KERNEL:
787 case MLX5_FLOW_NAMESPACE_BYPASS:
788 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
789 table_type = FS_FT_NIC_RX;
791 case MLX5_FLOW_NAMESPACE_EGRESS:
792 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
793 table_type = FS_FT_NIC_TX;
795 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
796 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
797 table_type = FS_FT_ESW_INGRESS_ACL;
803 if (num_actions > max_actions) {
804 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
805 num_actions, max_actions);
809 actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
810 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
812 in = kzalloc(inlen, GFP_KERNEL);
816 MLX5_SET(alloc_modify_header_context_in, in, opcode,
817 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
818 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
819 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
821 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
822 memcpy(actions_in, modify_actions, actions_size);
824 memset(out, 0, sizeof(out));
825 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
827 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
832 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
833 struct mlx5_modify_hdr *modify_hdr)
835 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
836 u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
837 struct mlx5_core_dev *dev = ns->dev;
839 memset(in, 0, sizeof(in));
840 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
841 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
842 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
845 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
848 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
849 .create_flow_table = mlx5_cmd_create_flow_table,
850 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
851 .modify_flow_table = mlx5_cmd_modify_flow_table,
852 .create_flow_group = mlx5_cmd_create_flow_group,
853 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
854 .create_fte = mlx5_cmd_create_fte,
855 .update_fte = mlx5_cmd_update_fte,
856 .delete_fte = mlx5_cmd_delete_fte,
857 .update_root_ft = mlx5_cmd_update_root_ft,
858 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
859 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
860 .modify_header_alloc = mlx5_cmd_modify_header_alloc,
861 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
862 .set_peer = mlx5_cmd_stub_set_peer,
863 .create_ns = mlx5_cmd_stub_create_ns,
864 .destroy_ns = mlx5_cmd_stub_destroy_ns,
867 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
868 .create_flow_table = mlx5_cmd_stub_create_flow_table,
869 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
870 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
871 .create_flow_group = mlx5_cmd_stub_create_flow_group,
872 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
873 .create_fte = mlx5_cmd_stub_create_fte,
874 .update_fte = mlx5_cmd_stub_update_fte,
875 .delete_fte = mlx5_cmd_stub_delete_fte,
876 .update_root_ft = mlx5_cmd_stub_update_root_ft,
877 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
878 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
879 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
880 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
881 .set_peer = mlx5_cmd_stub_set_peer,
882 .create_ns = mlx5_cmd_stub_create_ns,
883 .destroy_ns = mlx5_cmd_stub_destroy_ns,
886 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
888 return &mlx5_flow_cmds;
891 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
893 return &mlx5_flow_cmd_stubs;
896 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
900 case FS_FT_ESW_EGRESS_ACL:
901 case FS_FT_ESW_INGRESS_ACL:
903 case FS_FT_SNIFFER_RX:
904 case FS_FT_SNIFFER_TX:
907 return mlx5_fs_cmd_get_fw_cmds();
909 return mlx5_fs_cmd_get_stub_cmds();