]> Git Repo - linux.git/blob - drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
Linux 6.14-rc3
[linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / debugfs.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/debugfs.h>
34 #include <linux/mlx5/qp.h>
35 #include <linux/mlx5/cq.h>
36 #include <linux/mlx5/driver.h>
37 #include "mlx5_core.h"
38 #include "lib/eq.h"
39
40 enum {
41         QP_PID,
42         QP_STATE,
43         QP_XPORT,
44         QP_MTU,
45         QP_N_RECV,
46         QP_RECV_SZ,
47         QP_N_SEND,
48         QP_LOG_PG_SZ,
49         QP_RQPN,
50 };
51
52 static char *qp_fields[] = {
53         [QP_PID]        = "pid",
54         [QP_STATE]      = "state",
55         [QP_XPORT]      = "transport",
56         [QP_MTU]        = "mtu",
57         [QP_N_RECV]     = "num_recv",
58         [QP_RECV_SZ]    = "rcv_wqe_sz",
59         [QP_N_SEND]     = "num_send",
60         [QP_LOG_PG_SZ]  = "log2_page_sz",
61         [QP_RQPN]       = "remote_qpn",
62 };
63
64 enum {
65         EQ_NUM_EQES,
66         EQ_INTR,
67         EQ_LOG_PG_SZ,
68 };
69
70 static char *eq_fields[] = {
71         [EQ_NUM_EQES]   = "num_eqes",
72         [EQ_INTR]       = "intr",
73         [EQ_LOG_PG_SZ]  = "log_page_size",
74 };
75
76 enum {
77         CQ_PID,
78         CQ_NUM_CQES,
79         CQ_LOG_PG_SZ,
80 };
81
82 static char *cq_fields[] = {
83         [CQ_PID]        = "pid",
84         [CQ_NUM_CQES]   = "num_cqes",
85         [CQ_LOG_PG_SZ]  = "log_page_size",
86 };
87
88 struct dentry *mlx5_debugfs_root;
89 EXPORT_SYMBOL(mlx5_debugfs_root);
90
91 void mlx5_register_debugfs(void)
92 {
93         mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
94 }
95
96 void mlx5_unregister_debugfs(void)
97 {
98         debugfs_remove(mlx5_debugfs_root);
99 }
100
101 struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
102 {
103         return dev->priv.dbg.dbg_root;
104 }
105 EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
106
107 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
108 {
109         dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
110 }
111 EXPORT_SYMBOL(mlx5_qp_debugfs_init);
112
113 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
114 {
115         debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
116 }
117 EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
118
119 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
120 {
121         dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
122 }
123
124 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
125 {
126         debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
127 }
128
129 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
130                             loff_t *pos)
131 {
132         struct mlx5_cmd_stats *stats;
133         u64 field = 0;
134         int ret;
135         char tbuf[22];
136
137         stats = filp->private_data;
138         spin_lock_irq(&stats->lock);
139         if (stats->n)
140                 field = div64_u64(stats->sum, stats->n);
141         spin_unlock_irq(&stats->lock);
142         ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
143         return simple_read_from_buffer(buf, count, pos, tbuf, ret);
144 }
145
146 static ssize_t reset_write(struct file *filp, const char __user *buf,
147                            size_t count, loff_t *pos)
148 {
149         struct mlx5_cmd_stats *stats;
150
151         stats = filp->private_data;
152         spin_lock_irq(&stats->lock);
153         stats->sum = 0;
154         stats->n = 0;
155         stats->failed = 0;
156         stats->failed_mbox_status = 0;
157         stats->last_failed_errno = 0;
158         stats->last_failed_mbox_status = 0;
159         stats->last_failed_syndrome = 0;
160         spin_unlock_irq(&stats->lock);
161
162         *pos += count;
163
164         return count;
165 }
166
167 static const struct file_operations reset_fops = {
168         .owner  = THIS_MODULE,
169         .open   = simple_open,
170         .write  = reset_write,
171 };
172
173 static const struct file_operations average_fops = {
174         .owner  = THIS_MODULE,
175         .open   = simple_open,
176         .read   = average_read,
177 };
178
179 static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
180                           loff_t *pos)
181 {
182         struct mlx5_cmd *cmd;
183         char tbuf[6];
184         int weight;
185         int field;
186         int ret;
187
188         cmd = filp->private_data;
189         weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
190         field = cmd->vars.max_reg_cmds - weight;
191         ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
192         return simple_read_from_buffer(buf, count, pos, tbuf, ret);
193 }
194
195 static const struct file_operations slots_fops = {
196         .owner  = THIS_MODULE,
197         .open   = simple_open,
198         .read   = slots_read,
199 };
200
201 static struct mlx5_cmd_stats *
202 mlx5_cmdif_alloc_stats(struct xarray *stats_xa, int opcode)
203 {
204         struct mlx5_cmd_stats *stats = kzalloc(sizeof(*stats), GFP_KERNEL);
205         int err;
206
207         if (!stats)
208                 return NULL;
209
210         err = xa_insert(stats_xa, opcode, stats, GFP_KERNEL);
211         if (err) {
212                 kfree(stats);
213                 return NULL;
214         }
215         spin_lock_init(&stats->lock);
216         return stats;
217 }
218
219 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
220 {
221         struct mlx5_cmd_stats *stats;
222         struct dentry **cmd;
223         const char *namep;
224         int i;
225
226         cmd = &dev->priv.dbg.cmdif_debugfs;
227         *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
228
229         debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
230
231         xa_init(&dev->cmd.stats);
232
233         for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
234                 namep = mlx5_command_str(i);
235                 if (strcmp(namep, "unknown command opcode")) {
236                         stats = mlx5_cmdif_alloc_stats(&dev->cmd.stats, i);
237                         if (!stats)
238                                 continue;
239                         stats->root = debugfs_create_dir(namep, *cmd);
240
241                         debugfs_create_file("reset", 0200, stats->root, stats,
242                                             &reset_fops);
243                         debugfs_create_file("average", 0400, stats->root, stats,
244                                             &average_fops);
245                         debugfs_create_u64("n", 0400, stats->root, &stats->n);
246                         debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
247                         debugfs_create_u64("failed_mbox_status", 0400, stats->root,
248                                            &stats->failed_mbox_status);
249                         debugfs_create_u32("last_failed_errno", 0400, stats->root,
250                                            &stats->last_failed_errno);
251                         debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
252                                           &stats->last_failed_mbox_status);
253                         debugfs_create_x32("last_failed_syndrome", 0400, stats->root,
254                                            &stats->last_failed_syndrome);
255                 }
256         }
257 }
258
259 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
260 {
261         struct mlx5_cmd_stats *stats;
262         unsigned long i;
263
264         debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
265         xa_for_each(&dev->cmd.stats, i, stats)
266                 kfree(stats);
267         xa_destroy(&dev->cmd.stats);
268 }
269
270 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
271 {
272         dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
273 }
274
275 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
276 {
277         debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
278 }
279
280 void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
281 {
282         struct dentry *pages;
283
284         dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
285         pages = dev->priv.dbg.pages_debugfs;
286
287         debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
288         debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
289         debugfs_create_u32("fw_pages_ec_vfs", 0400, pages, &dev->priv.page_counters[MLX5_EC_VF]);
290         debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
291         debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
292         debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
293         debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
294         debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
295                            &dev->priv.reclaim_pages_discard);
296 }
297
298 void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
299 {
300         debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
301 }
302
303 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
304                          int index, int *is_str)
305 {
306         int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
307         u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
308         u64 param = 0;
309         u32 *out;
310         int state;
311         u32 *qpc;
312         int err;
313
314         out = kzalloc(outlen, GFP_KERNEL);
315         if (!out)
316                 return 0;
317
318         MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
319         MLX5_SET(query_qp_in, in, qpn, qp->qpn);
320         err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
321         if (err)
322                 goto out;
323
324         *is_str = 0;
325
326         qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
327         switch (index) {
328         case QP_PID:
329                 param = qp->pid;
330                 break;
331         case QP_STATE:
332                 state = MLX5_GET(qpc, qpc, state);
333                 param = (unsigned long)mlx5_qp_state_str(state);
334                 *is_str = 1;
335                 break;
336         case QP_XPORT:
337                 param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
338                 *is_str = 1;
339                 break;
340         case QP_MTU:
341                 switch (MLX5_GET(qpc, qpc, mtu)) {
342                 case IB_MTU_256:
343                         param = 256;
344                         break;
345                 case IB_MTU_512:
346                         param = 512;
347                         break;
348                 case IB_MTU_1024:
349                         param = 1024;
350                         break;
351                 case IB_MTU_2048:
352                         param = 2048;
353                         break;
354                 case IB_MTU_4096:
355                         param = 4096;
356                         break;
357                 default:
358                         param = 0;
359                 }
360                 break;
361         case QP_N_RECV:
362                 param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
363                 break;
364         case QP_RECV_SZ:
365                 param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
366                 break;
367         case QP_N_SEND:
368                 if (!MLX5_GET(qpc, qpc, no_sq))
369                         param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
370                 break;
371         case QP_LOG_PG_SZ:
372                 param = MLX5_GET(qpc, qpc, log_page_size) + 12;
373                 break;
374         case QP_RQPN:
375                 param = MLX5_GET(qpc, qpc, remote_qpn);
376                 break;
377         }
378 out:
379         kfree(out);
380         return param;
381 }
382
383 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
384                          int index)
385 {
386         int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
387         u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
388         u64 param = 0;
389         void *ctx;
390         u32 *out;
391         int err;
392
393         out = kzalloc(outlen, GFP_KERNEL);
394         if (!out)
395                 return param;
396
397         MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
398         MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
399         err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
400         if (err) {
401                 mlx5_core_warn(dev, "failed to query eq\n");
402                 goto out;
403         }
404         ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
405
406         switch (index) {
407         case EQ_NUM_EQES:
408                 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
409                 break;
410         case EQ_INTR:
411                 param = MLX5_GET(eqc, ctx, intr);
412                 break;
413         case EQ_LOG_PG_SZ:
414                 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
415                 break;
416         }
417
418 out:
419         kfree(out);
420         return param;
421 }
422
423 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
424                          int index)
425 {
426         int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
427         u64 param = 0;
428         void *ctx;
429         u32 *out;
430         int err;
431
432         out = kvzalloc(outlen, GFP_KERNEL);
433         if (!out)
434                 return param;
435
436         err = mlx5_core_query_cq(dev, cq, out);
437         if (err) {
438                 mlx5_core_warn(dev, "failed to query cq\n");
439                 goto out;
440         }
441         ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
442
443         switch (index) {
444         case CQ_PID:
445                 param = cq->pid;
446                 break;
447         case CQ_NUM_CQES:
448                 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
449                 break;
450         case CQ_LOG_PG_SZ:
451                 param = MLX5_GET(cqc, ctx, log_page_size);
452                 break;
453         }
454
455 out:
456         kvfree(out);
457         return param;
458 }
459
460 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
461                         loff_t *pos)
462 {
463         struct mlx5_field_desc *desc;
464         struct mlx5_rsc_debug *d;
465         char tbuf[18];
466         int is_str = 0;
467         u64 field;
468         int ret;
469
470         desc = filp->private_data;
471         d = (void *)(desc - desc->i) - sizeof(*d);
472         switch (d->type) {
473         case MLX5_DBG_RSC_QP:
474                 field = qp_read_field(d->dev, d->object, desc->i, &is_str);
475                 break;
476
477         case MLX5_DBG_RSC_EQ:
478                 field = eq_read_field(d->dev, d->object, desc->i);
479                 break;
480
481         case MLX5_DBG_RSC_CQ:
482                 field = cq_read_field(d->dev, d->object, desc->i);
483                 break;
484
485         default:
486                 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
487                 return -EINVAL;
488         }
489
490         if (is_str)
491                 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
492         else
493                 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
494
495         return simple_read_from_buffer(buf, count, pos, tbuf, ret);
496 }
497
498 static const struct file_operations fops = {
499         .owner  = THIS_MODULE,
500         .open   = simple_open,
501         .read   = dbg_read,
502 };
503
504 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
505                         struct dentry *root, struct mlx5_rsc_debug **dbg,
506                         int rsn, char **field, int nfile, void *data)
507 {
508         struct mlx5_rsc_debug *d;
509         char resn[32];
510         int i;
511
512         d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
513         if (!d)
514                 return -ENOMEM;
515
516         d->dev = dev;
517         d->object = data;
518         d->type = type;
519         sprintf(resn, "0x%x", rsn);
520         d->root = debugfs_create_dir(resn,  root);
521
522         for (i = 0; i < nfile; i++) {
523                 d->fields[i].i = i;
524                 debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
525                                     &fops);
526         }
527         *dbg = d;
528
529         return 0;
530 }
531
532 static void rem_res_tree(struct mlx5_rsc_debug *d)
533 {
534         debugfs_remove_recursive(d->root);
535         kfree(d);
536 }
537
538 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
539 {
540         int err;
541
542         if (!mlx5_debugfs_root)
543                 return 0;
544
545         err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
546                            &qp->dbg, qp->qpn, qp_fields,
547                            ARRAY_SIZE(qp_fields), qp);
548         if (err)
549                 qp->dbg = NULL;
550
551         return err;
552 }
553 EXPORT_SYMBOL(mlx5_debug_qp_add);
554
555 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
556 {
557         if (!mlx5_debugfs_root || !qp->dbg)
558                 return;
559
560         rem_res_tree(qp->dbg);
561         qp->dbg = NULL;
562 }
563 EXPORT_SYMBOL(mlx5_debug_qp_remove);
564
565 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
566 {
567         int err;
568
569         if (!mlx5_debugfs_root)
570                 return 0;
571
572         err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
573                            &eq->dbg, eq->eqn, eq_fields,
574                            ARRAY_SIZE(eq_fields), eq);
575         if (err)
576                 eq->dbg = NULL;
577
578         return err;
579 }
580
581 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
582 {
583         if (!mlx5_debugfs_root)
584                 return;
585
586         if (eq->dbg)
587                 rem_res_tree(eq->dbg);
588 }
589
590 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
591 {
592         int err;
593
594         if (!mlx5_debugfs_root)
595                 return 0;
596
597         err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
598                            &cq->dbg, cq->cqn, cq_fields,
599                            ARRAY_SIZE(cq_fields), cq);
600         if (err)
601                 cq->dbg = NULL;
602
603         return err;
604 }
605
606 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
607 {
608         if (!mlx5_debugfs_root)
609                 return;
610
611         if (cq->dbg) {
612                 rem_res_tree(cq->dbg);
613                 cq->dbg = NULL;
614         }
615 }
This page took 0.066085 seconds and 4 git commands to generate.