]> Git Repo - J-linux.git/blob - drivers/infiniband/hw/mlx5/qpc.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / infiniband / hw / mlx5 / qpc.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4  */
5
6 #include <linux/gfp.h>
7 #include <linux/mlx5/qp.h>
8 #include <linux/mlx5/driver.h>
9 #include "mlx5_ib.h"
10 #include "qp.h"
11
12 static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
13                                struct mlx5_core_dct *dct);
14
15 static struct mlx5_core_rsc_common *
16 mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
17 {
18         struct mlx5_core_rsc_common *common;
19         unsigned long flags;
20
21         spin_lock_irqsave(&table->lock, flags);
22
23         common = radix_tree_lookup(&table->tree, rsn);
24         if (common)
25                 refcount_inc(&common->refcount);
26
27         spin_unlock_irqrestore(&table->lock, flags);
28
29         return common;
30 }
31
32 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
33 {
34         if (refcount_dec_and_test(&common->refcount))
35                 complete(&common->free);
36 }
37
38 static u64 qp_allowed_event_types(void)
39 {
40         u64 mask;
41
42         mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
43                BIT(MLX5_EVENT_TYPE_COMM_EST) |
44                BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
45                BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
46                BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
47                BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
48                BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
49                BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
50
51         return mask;
52 }
53
54 static u64 rq_allowed_event_types(void)
55 {
56         u64 mask;
57
58         mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
59                BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
60
61         return mask;
62 }
63
64 static u64 sq_allowed_event_types(void)
65 {
66         return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
67 }
68
69 static u64 dct_allowed_event_types(void)
70 {
71         return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
72 }
73
74 static bool is_event_type_allowed(int rsc_type, int event_type)
75 {
76         switch (rsc_type) {
77         case MLX5_EVENT_QUEUE_TYPE_QP:
78                 return BIT(event_type) & qp_allowed_event_types();
79         case MLX5_EVENT_QUEUE_TYPE_RQ:
80                 return BIT(event_type) & rq_allowed_event_types();
81         case MLX5_EVENT_QUEUE_TYPE_SQ:
82                 return BIT(event_type) & sq_allowed_event_types();
83         case MLX5_EVENT_QUEUE_TYPE_DCT:
84                 return BIT(event_type) & dct_allowed_event_types();
85         default:
86                 WARN(1, "Event arrived for unknown resource type");
87                 return false;
88         }
89 }
90
91 static int dct_event_notifier(struct mlx5_ib_dev *dev, struct mlx5_eqe *eqe)
92 {
93         struct mlx5_core_dct *dct;
94         unsigned long flags;
95         u32 qpn;
96
97         qpn = be32_to_cpu(eqe->data.dct.dctn) & 0xFFFFFF;
98         xa_lock_irqsave(&dev->qp_table.dct_xa, flags);
99         dct = xa_load(&dev->qp_table.dct_xa, qpn);
100         if (dct)
101                 complete(&dct->drained);
102         xa_unlock_irqrestore(&dev->qp_table.dct_xa, flags);
103         return NOTIFY_OK;
104 }
105
106 static int rsc_event_notifier(struct notifier_block *nb,
107                               unsigned long type, void *data)
108 {
109         struct mlx5_ib_dev *dev =
110                 container_of(nb, struct mlx5_ib_dev, qp_table.nb);
111         struct mlx5_core_rsc_common *common;
112         struct mlx5_eqe *eqe = data;
113         u8 event_type = (u8)type;
114         struct mlx5_core_qp *qp;
115         u32 rsn;
116
117         switch (event_type) {
118         case MLX5_EVENT_TYPE_DCT_DRAINED:
119                 return dct_event_notifier(dev, eqe);
120         case MLX5_EVENT_TYPE_PATH_MIG:
121         case MLX5_EVENT_TYPE_COMM_EST:
122         case MLX5_EVENT_TYPE_SQ_DRAINED:
123         case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
124         case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
125         case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
126         case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
127         case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
128                 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
129                 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
130                 break;
131         default:
132                 return NOTIFY_DONE;
133         }
134
135         common = mlx5_get_rsc(&dev->qp_table, rsn);
136         if (!common)
137                 return NOTIFY_OK;
138
139         if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
140                 goto out;
141
142         switch (common->res) {
143         case MLX5_RES_QP:
144         case MLX5_RES_RQ:
145         case MLX5_RES_SQ:
146                 qp = (struct mlx5_core_qp *)common;
147                 qp->event(qp, event_type);
148                 /* Need to put resource in event handler */
149                 return NOTIFY_OK;
150         default:
151                 break;
152         }
153 out:
154         mlx5_core_put_rsc(common);
155
156         return NOTIFY_OK;
157 }
158
159 static int create_resource_common(struct mlx5_ib_dev *dev,
160                                   struct mlx5_core_qp *qp, int rsc_type)
161 {
162         struct mlx5_qp_table *table = &dev->qp_table;
163         int err;
164
165         qp->common.res = rsc_type;
166         spin_lock_irq(&table->lock);
167         err = radix_tree_insert(&table->tree,
168                                 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
169                                 qp);
170         spin_unlock_irq(&table->lock);
171         if (err)
172                 return err;
173
174         refcount_set(&qp->common.refcount, 1);
175         init_completion(&qp->common.free);
176         qp->pid = current->pid;
177
178         return 0;
179 }
180
181 static void destroy_resource_common(struct mlx5_ib_dev *dev,
182                                     struct mlx5_core_qp *qp)
183 {
184         struct mlx5_qp_table *table = &dev->qp_table;
185         unsigned long flags;
186
187         spin_lock_irqsave(&table->lock, flags);
188         radix_tree_delete(&table->tree,
189                           qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
190         spin_unlock_irqrestore(&table->lock, flags);
191         mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
192         wait_for_completion(&qp->common.free);
193 }
194
195 static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
196                                   struct mlx5_core_dct *dct)
197 {
198         u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
199         struct mlx5_core_qp *qp = &dct->mqp;
200
201         MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
202         MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
203         MLX5_SET(destroy_dct_in, in, uid, qp->uid);
204         return mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
205 }
206
207 int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
208                          u32 *in, int inlen, u32 *out, int outlen)
209 {
210         struct mlx5_core_qp *qp = &dct->mqp;
211         int err;
212
213         init_completion(&dct->drained);
214         MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
215
216         err = mlx5_cmd_do(dev->mdev, in, inlen, out, outlen);
217         if (err)
218                 return err;
219
220         qp->qpn = MLX5_GET(create_dct_out, out, dctn);
221         qp->uid = MLX5_GET(create_dct_in, in, uid);
222         err = xa_err(xa_store_irq(&dev->qp_table.dct_xa, qp->qpn, dct, GFP_KERNEL));
223         if (err)
224                 goto err_cmd;
225
226         return 0;
227 err_cmd:
228         _mlx5_core_destroy_dct(dev, dct);
229         return err;
230 }
231
232 int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
233                        u32 *in, int inlen, u32 *out)
234 {
235         u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
236         int err;
237
238         MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
239
240         err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
241                             MLX5_ST_SZ_BYTES(create_qp_out));
242         if (err)
243                 return err;
244
245         qp->uid = MLX5_GET(create_qp_in, in, uid);
246         qp->qpn = MLX5_GET(create_qp_out, out, qpn);
247
248         err = create_resource_common(dev, qp, MLX5_RES_QP);
249         if (err)
250                 goto err_cmd;
251
252         if (dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI)
253                 mlx5_debug_qp_add(dev->mdev, qp);
254
255         return 0;
256
257 err_cmd:
258         MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
259         MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
260         MLX5_SET(destroy_qp_in, din, uid, qp->uid);
261         mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
262         return err;
263 }
264
265 static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
266                                struct mlx5_core_dct *dct)
267 {
268         u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
269         struct mlx5_core_qp *qp = &dct->mqp;
270
271         MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
272         MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
273         MLX5_SET(drain_dct_in, in, uid, qp->uid);
274         return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
275 }
276
277 int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
278                           struct mlx5_core_dct *dct)
279 {
280         struct mlx5_qp_table *table = &dev->qp_table;
281         struct mlx5_core_dct *tmp;
282         int err;
283
284         err = mlx5_core_drain_dct(dev, dct);
285         if (err) {
286                 if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
287                         goto destroy;
288
289                 return err;
290         }
291         wait_for_completion(&dct->drained);
292
293 destroy:
294         tmp = xa_cmpxchg_irq(&table->dct_xa, dct->mqp.qpn, dct, XA_ZERO_ENTRY, GFP_KERNEL);
295         if (WARN_ON(tmp != dct))
296                 return xa_err(tmp) ?: -EINVAL;
297
298         err = _mlx5_core_destroy_dct(dev, dct);
299         if (err) {
300                 xa_cmpxchg_irq(&table->dct_xa, dct->mqp.qpn, XA_ZERO_ENTRY, dct, 0);
301                 return err;
302         }
303         xa_erase_irq(&table->dct_xa, dct->mqp.qpn);
304         return 0;
305 }
306
307 int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
308 {
309         u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
310
311         if (dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI)
312                 mlx5_debug_qp_remove(dev->mdev, qp);
313
314         destroy_resource_common(dev, qp);
315
316         MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
317         MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
318         MLX5_SET(destroy_qp_in, in, uid, qp->uid);
319         return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
320 }
321
322 int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
323                              u32 timeout_usec)
324 {
325         u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
326
327         MLX5_SET(set_delay_drop_params_in, in, opcode,
328                  MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
329         MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
330                  timeout_usec / 100);
331         return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
332 }
333
334 struct mbox_info {
335         u32 *in;
336         u32 *out;
337         int inlen;
338         int outlen;
339 };
340
341 static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
342 {
343         mbox->inlen  = inlen;
344         mbox->outlen = outlen;
345         mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
346         mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
347         if (!mbox->in || !mbox->out) {
348                 kfree(mbox->in);
349                 kfree(mbox->out);
350                 return -ENOMEM;
351         }
352
353         return 0;
354 }
355
356 static void mbox_free(struct mbox_info *mbox)
357 {
358         kfree(mbox->in);
359         kfree(mbox->out);
360 }
361
362 static int get_ece_from_mbox(void *out, u16 opcode)
363 {
364         int ece = 0;
365
366         switch (opcode) {
367         case MLX5_CMD_OP_INIT2INIT_QP:
368                 ece = MLX5_GET(init2init_qp_out, out, ece);
369                 break;
370         case MLX5_CMD_OP_INIT2RTR_QP:
371                 ece = MLX5_GET(init2rtr_qp_out, out, ece);
372                 break;
373         case MLX5_CMD_OP_RTR2RTS_QP:
374                 ece = MLX5_GET(rtr2rts_qp_out, out, ece);
375                 break;
376         case MLX5_CMD_OP_RTS2RTS_QP:
377                 ece = MLX5_GET(rts2rts_qp_out, out, ece);
378                 break;
379         case MLX5_CMD_OP_RST2INIT_QP:
380                 ece = MLX5_GET(rst2init_qp_out, out, ece);
381                 break;
382         default:
383                 break;
384         }
385
386         return ece;
387 }
388
389 static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
390                                 u32 opt_param_mask, void *qpc,
391                                 struct mbox_info *mbox, u16 uid, u32 ece)
392 {
393         mbox->out = NULL;
394         mbox->in = NULL;
395
396 #define MBOX_ALLOC(mbox, typ)  \
397         mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
398
399 #define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid)                            \
400         do {                                                                   \
401                 MLX5_SET(typ##_in, in, opcode, _opcode);                       \
402                 MLX5_SET(typ##_in, in, qpn, _qpn);                             \
403                 MLX5_SET(typ##_in, in, uid, _uid);                             \
404         } while (0)
405
406 #define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid)          \
407         do {                                                                   \
408                 MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid);                   \
409                 MLX5_SET(typ##_in, in, opt_param_mask, _opt_p);                \
410                 memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc,                  \
411                        MLX5_ST_SZ_BYTES(qpc));                                 \
412         } while (0)
413
414         switch (opcode) {
415         /* 2RST & 2ERR */
416         case MLX5_CMD_OP_2RST_QP:
417                 if (MBOX_ALLOC(mbox, qp_2rst))
418                         return -ENOMEM;
419                 MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
420                 break;
421         case MLX5_CMD_OP_2ERR_QP:
422                 if (MBOX_ALLOC(mbox, qp_2err))
423                         return -ENOMEM;
424                 MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
425                 break;
426
427         /* MODIFY with QPC */
428         case MLX5_CMD_OP_RST2INIT_QP:
429                 if (MBOX_ALLOC(mbox, rst2init_qp))
430                         return -ENOMEM;
431                 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
432                                   opt_param_mask, qpc, uid);
433                 MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
434                 break;
435         case MLX5_CMD_OP_INIT2RTR_QP:
436                 if (MBOX_ALLOC(mbox, init2rtr_qp))
437                         return -ENOMEM;
438                 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
439                                   opt_param_mask, qpc, uid);
440                 MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
441                 break;
442         case MLX5_CMD_OP_RTR2RTS_QP:
443                 if (MBOX_ALLOC(mbox, rtr2rts_qp))
444                         return -ENOMEM;
445                 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
446                                   opt_param_mask, qpc, uid);
447                 MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
448                 break;
449         case MLX5_CMD_OP_RTS2RTS_QP:
450                 if (MBOX_ALLOC(mbox, rts2rts_qp))
451                         return -ENOMEM;
452                 MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
453                                   opt_param_mask, qpc, uid);
454                 MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
455                 break;
456         case MLX5_CMD_OP_SQERR2RTS_QP:
457                 if (MBOX_ALLOC(mbox, sqerr2rts_qp))
458                         return -ENOMEM;
459                 MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
460                                   opt_param_mask, qpc, uid);
461                 break;
462         case MLX5_CMD_OP_SQD_RTS_QP:
463                 if (MBOX_ALLOC(mbox, sqd2rts_qp))
464                         return -ENOMEM;
465                 MOD_QP_IN_SET_QPC(sqd2rts_qp, mbox->in, opcode, qpn,
466                                   opt_param_mask, qpc, uid);
467                 break;
468         case MLX5_CMD_OP_INIT2INIT_QP:
469                 if (MBOX_ALLOC(mbox, init2init_qp))
470                         return -ENOMEM;
471                 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
472                                   opt_param_mask, qpc, uid);
473                 MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
474                 break;
475         default:
476                 return -EINVAL;
477         }
478         return 0;
479 }
480
481 int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
482                         void *qpc, struct mlx5_core_qp *qp, u32 *ece)
483 {
484         struct mbox_info mbox;
485         int err;
486
487         err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
488                                    qpc, &mbox, qp->uid, (ece) ? *ece : 0);
489         if (err)
490                 return err;
491
492         err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
493                             mbox.outlen);
494
495         if (ece)
496                 *ece = get_ece_from_mbox(mbox.out, opcode);
497
498         mbox_free(&mbox);
499         return err;
500 }
501
502 int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
503 {
504         struct mlx5_qp_table *table = &dev->qp_table;
505
506         spin_lock_init(&table->lock);
507         INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
508         xa_init(&table->dct_xa);
509
510         if (dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI)
511                 mlx5_qp_debugfs_init(dev->mdev);
512
513         table->nb.notifier_call = rsc_event_notifier;
514         mlx5_notifier_register(dev->mdev, &table->nb);
515
516         return 0;
517 }
518
519 void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
520 {
521         struct mlx5_qp_table *table = &dev->qp_table;
522
523         mlx5_notifier_unregister(dev->mdev, &table->nb);
524         if (dev->ib_dev.type != RDMA_DEVICE_TYPE_SMI)
525                 mlx5_qp_debugfs_cleanup(dev->mdev);
526 }
527
528 int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
529                        u32 *out, int outlen, bool qpc_ext)
530 {
531         u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
532
533         MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
534         MLX5_SET(query_qp_in, in, qpn, qp->qpn);
535         MLX5_SET(query_qp_in, in, qpc_ext, qpc_ext);
536
537         return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
538 }
539
540 int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
541                         u32 *out, int outlen)
542 {
543         u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
544         struct mlx5_core_qp *qp = &dct->mqp;
545
546         MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
547         MLX5_SET(query_dct_in, in, dctn, qp->qpn);
548
549         return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
550                              outlen);
551 }
552
553 int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
554 {
555         u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
556         u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
557         int err;
558
559         MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
560         err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
561         if (!err)
562                 *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
563         return err;
564 }
565
566 int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
567 {
568         u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
569
570         MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
571         MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
572         return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
573 }
574
575 static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
576 {
577         u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
578
579         MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
580         MLX5_SET(destroy_rq_in, in, rqn, rqn);
581         MLX5_SET(destroy_rq_in, in, uid, uid);
582         return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
583 }
584
585 int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
586                                 struct mlx5_core_qp *rq)
587 {
588         int err;
589         u32 rqn;
590
591         err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
592         if (err)
593                 return err;
594
595         rq->uid = MLX5_GET(create_rq_in, in, uid);
596         rq->qpn = rqn;
597         err = create_resource_common(dev, rq, MLX5_RES_RQ);
598         if (err)
599                 goto err_destroy_rq;
600
601         return 0;
602
603 err_destroy_rq:
604         destroy_rq_tracked(dev, rq->qpn, rq->uid);
605
606         return err;
607 }
608
609 int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
610                                  struct mlx5_core_qp *rq)
611 {
612         destroy_resource_common(dev, rq);
613         return destroy_rq_tracked(dev, rq->qpn, rq->uid);
614 }
615
616 static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
617 {
618         u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
619
620         MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
621         MLX5_SET(destroy_sq_in, in, sqn, sqn);
622         MLX5_SET(destroy_sq_in, in, uid, uid);
623         mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
624 }
625
626 int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
627                                 struct mlx5_core_qp *sq)
628 {
629         u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
630         int err;
631
632         MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
633         err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
634         if (err)
635                 return err;
636
637         sq->qpn = MLX5_GET(create_sq_out, out, sqn);
638         sq->uid = MLX5_GET(create_sq_in, in, uid);
639         err = create_resource_common(dev, sq, MLX5_RES_SQ);
640         if (err)
641                 goto err_destroy_sq;
642
643         return 0;
644
645 err_destroy_sq:
646         destroy_sq_tracked(dev, sq->qpn, sq->uid);
647
648         return err;
649 }
650
651 void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
652                                   struct mlx5_core_qp *sq)
653 {
654         destroy_resource_common(dev, sq);
655         destroy_sq_tracked(dev, sq->qpn, sq->uid);
656 }
657
658 struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
659                                                 int res_num,
660                                                 enum mlx5_res_type res_type)
661 {
662         u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
663         struct mlx5_qp_table *table = &dev->qp_table;
664
665         return mlx5_get_rsc(table, rsn);
666 }
667
668 void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
669 {
670         mlx5_core_put_rsc(res);
671 }
This page took 0.062226 seconds and 4 git commands to generate.