1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */
5 #include <linux/mlx5/transobj.h>
7 static bool verify_num_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
10 unsigned int max_num_vhca_id = MLX5_CAP_GEN_2(mdev, max_rqt_vhca_id);
13 /* Verify that all vhca_ids are in range [0, max_num_vhca_ids - 1] */
14 for (i = 0; i < size; i++)
15 if (vhca_ids[i] >= max_num_vhca_id)
20 static bool rqt_verify_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
26 if (!MLX5_CAP_GEN(mdev, cross_vhca_rqt))
28 if (!verify_num_vhca_ids(mdev, vhca_ids, size))
34 void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
35 unsigned int num_channels)
39 for (i = 0; i < indir->actual_table_size; i++)
40 indir->table[i] = i % num_channels;
43 static void fill_rqn_list(void *rqtc, u32 *rqns, u32 *vhca_ids, unsigned int size)
48 MLX5_SET(rqtc, rqtc, rq_vhca_id_format, 1);
49 for (i = 0; i < size; i++) {
50 MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_num, rqns[i]);
51 MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_vhca_id, vhca_ids[i]);
54 for (i = 0; i < size; i++)
55 MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
58 static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
59 u16 max_size, u32 *init_rqns, u32 *init_vhca_ids, u16 init_size)
67 if (!rqt_verify_vhca_ids(mdev, init_vhca_ids, init_size))
73 entry_sz = init_vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
74 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + entry_sz * init_size;
75 in = kvzalloc(inlen, GFP_KERNEL);
79 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
81 MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size);
82 MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size);
84 fill_rqn_list(rqtc, init_rqns, init_vhca_ids, init_size);
86 err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn);
92 int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
93 bool indir_enabled, u32 init_rqn, u32 indir_table_size)
95 u16 max_size = indir_enabled ? indir_table_size : 1;
97 return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, NULL, 1);
100 static int mlx5e_bits_invert(unsigned long a, int size)
105 for (i = 0; i < size; i++)
106 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
111 static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, u32 *rss_vhca_ids, u32 *vhca_ids,
112 unsigned int num_rqns,
113 u8 hfunc, struct mlx5e_rss_params_indir *indir)
117 for (i = 0; i < indir->actual_table_size; i++) {
120 if (hfunc == ETH_RSS_HASH_XOR)
121 ix = mlx5e_bits_invert(ix, ilog2(indir->actual_table_size));
123 ix = indir->table[ix];
125 if (WARN_ON(ix >= num_rqns))
126 /* Could be a bug in the driver or in the kernel part of
127 * ethtool: indir table refers to non-existent RQs.
130 rss_rqns[i] = rqns[ix];
132 rss_vhca_ids[i] = vhca_ids[ix];
138 int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
139 u32 *rqns, u32 *vhca_ids, unsigned int num_rqns,
140 u8 hfunc, struct mlx5e_rss_params_indir *indir)
142 u32 *rss_rqns, *rss_vhca_ids = NULL;
145 rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
150 rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
158 err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
162 err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns, rss_vhca_ids,
163 indir->actual_table_size);
166 kvfree(rss_vhca_ids);
171 #define MLX5E_UNIFORM_SPREAD_RQT_FACTOR 2
173 u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
175 u32 rqt_size = max_t(u32, MLX5E_INDIR_MIN_RQT_SIZE,
176 roundup_pow_of_two(num_channels * MLX5E_UNIFORM_SPREAD_RQT_FACTOR));
177 u32 max_cap_rqt_size = 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size);
179 return min_t(u32, rqt_size, max_cap_rqt_size);
182 #define MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH 256
184 unsigned int mlx5e_rqt_max_num_channels_allowed_for_xor8(void)
186 return MLX5E_MAX_RQT_SIZE_ALLOWED_WITH_XOR8_HASH / MLX5E_UNIFORM_SPREAD_RQT_FACTOR;
189 void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
191 mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
194 static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
203 if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, size))
206 entry_sz = vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
207 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + entry_sz * size;
208 in = kvzalloc(inlen, GFP_KERNEL);
212 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
214 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
215 MLX5_SET(rqtc, rqtc, rqt_actual_size, size);
217 fill_rqn_list(rqtc, rqns, vhca_ids, size);
219 err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen);
225 int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id)
227 return mlx5e_rqt_redirect(rqt, &rqn, vhca_id, 1);
230 int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
231 unsigned int num_rqns,
232 u8 hfunc, struct mlx5e_rss_params_indir *indir)
234 u32 *rss_rqns, *rss_vhca_ids = NULL;
237 if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, num_rqns))
240 if (WARN_ON(rqt->size != indir->max_table_size))
243 rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
248 rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
256 err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
260 err = mlx5e_rqt_redirect(rqt, rss_rqns, rss_vhca_ids, indir->actual_table_size);
263 kvfree(rss_vhca_ids);