2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/debugfs.h>
34 #include <linux/mlx5/qp.h>
35 #include <linux/mlx5/cq.h>
36 #include <linux/mlx5/driver.h>
37 #include "mlx5_core.h"
52 static char *qp_fields[] = {
55 [QP_XPORT] = "transport",
57 [QP_N_RECV] = "num_recv",
58 [QP_RECV_SZ] = "rcv_wqe_sz",
59 [QP_N_SEND] = "num_send",
60 [QP_LOG_PG_SZ] = "log2_page_sz",
61 [QP_RQPN] = "remote_qpn",
70 static char *eq_fields[] = {
71 [EQ_NUM_EQES] = "num_eqes",
73 [EQ_LOG_PG_SZ] = "log_page_size",
82 static char *cq_fields[] = {
84 [CQ_NUM_CQES] = "num_cqes",
85 [CQ_LOG_PG_SZ] = "log_page_size",
88 struct dentry *mlx5_debugfs_root;
89 EXPORT_SYMBOL(mlx5_debugfs_root);
91 void mlx5_register_debugfs(void)
93 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
96 void mlx5_unregister_debugfs(void)
98 debugfs_remove(mlx5_debugfs_root);
101 struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
103 return dev->priv.dbg.dbg_root;
105 EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
107 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
109 dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
111 EXPORT_SYMBOL(mlx5_qp_debugfs_init);
113 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
115 debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
117 EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
119 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
121 dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
124 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
126 debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
129 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
132 struct mlx5_cmd_stats *stats;
137 stats = filp->private_data;
138 spin_lock_irq(&stats->lock);
140 field = div64_u64(stats->sum, stats->n);
141 spin_unlock_irq(&stats->lock);
142 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
143 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
146 static ssize_t reset_write(struct file *filp, const char __user *buf,
147 size_t count, loff_t *pos)
149 struct mlx5_cmd_stats *stats;
151 stats = filp->private_data;
152 spin_lock_irq(&stats->lock);
156 stats->failed_mbox_status = 0;
157 stats->last_failed_errno = 0;
158 stats->last_failed_mbox_status = 0;
159 stats->last_failed_syndrome = 0;
160 spin_unlock_irq(&stats->lock);
167 static const struct file_operations reset_fops = {
168 .owner = THIS_MODULE,
170 .write = reset_write,
173 static const struct file_operations average_fops = {
174 .owner = THIS_MODULE,
176 .read = average_read,
179 static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
182 struct mlx5_cmd *cmd;
188 cmd = filp->private_data;
189 weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
190 field = cmd->vars.max_reg_cmds - weight;
191 ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
192 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
195 static const struct file_operations slots_fops = {
196 .owner = THIS_MODULE,
201 static struct mlx5_cmd_stats *
202 mlx5_cmdif_alloc_stats(struct xarray *stats_xa, int opcode)
204 struct mlx5_cmd_stats *stats = kzalloc(sizeof(*stats), GFP_KERNEL);
210 err = xa_insert(stats_xa, opcode, stats, GFP_KERNEL);
215 spin_lock_init(&stats->lock);
219 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
221 struct mlx5_cmd_stats *stats;
226 cmd = &dev->priv.dbg.cmdif_debugfs;
227 *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
229 debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
231 xa_init(&dev->cmd.stats);
233 for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
234 namep = mlx5_command_str(i);
235 if (strcmp(namep, "unknown command opcode")) {
236 stats = mlx5_cmdif_alloc_stats(&dev->cmd.stats, i);
239 stats->root = debugfs_create_dir(namep, *cmd);
241 debugfs_create_file("reset", 0200, stats->root, stats,
243 debugfs_create_file("average", 0400, stats->root, stats,
245 debugfs_create_u64("n", 0400, stats->root, &stats->n);
246 debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
247 debugfs_create_u64("failed_mbox_status", 0400, stats->root,
248 &stats->failed_mbox_status);
249 debugfs_create_u32("last_failed_errno", 0400, stats->root,
250 &stats->last_failed_errno);
251 debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
252 &stats->last_failed_mbox_status);
253 debugfs_create_x32("last_failed_syndrome", 0400, stats->root,
254 &stats->last_failed_syndrome);
259 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
261 struct mlx5_cmd_stats *stats;
264 debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
265 xa_for_each(&dev->cmd.stats, i, stats)
267 xa_destroy(&dev->cmd.stats);
270 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
272 dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
275 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
277 debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
280 void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
282 struct dentry *pages;
284 dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
285 pages = dev->priv.dbg.pages_debugfs;
287 debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
288 debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
289 debugfs_create_u32("fw_pages_ec_vfs", 0400, pages, &dev->priv.page_counters[MLX5_EC_VF]);
290 debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
291 debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
292 debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
293 debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
294 debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
295 &dev->priv.reclaim_pages_discard);
298 void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
300 debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
303 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
304 int index, int *is_str)
306 int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
307 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
314 out = kzalloc(outlen, GFP_KERNEL);
318 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
319 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
320 err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
326 qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
332 state = MLX5_GET(qpc, qpc, state);
333 param = (unsigned long)mlx5_qp_state_str(state);
337 param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
341 switch (MLX5_GET(qpc, qpc, mtu)) {
362 param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
365 param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
368 if (!MLX5_GET(qpc, qpc, no_sq))
369 param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
372 param = MLX5_GET(qpc, qpc, log_page_size) + 12;
375 param = MLX5_GET(qpc, qpc, remote_qpn);
383 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
386 int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
387 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
393 out = kzalloc(outlen, GFP_KERNEL);
397 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
398 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
399 err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
401 mlx5_core_warn(dev, "failed to query eq\n");
404 ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
408 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
411 param = MLX5_GET(eqc, ctx, intr);
414 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
423 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
426 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
432 out = kvzalloc(outlen, GFP_KERNEL);
436 err = mlx5_core_query_cq(dev, cq, out);
438 mlx5_core_warn(dev, "failed to query cq\n");
441 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
448 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
451 param = MLX5_GET(cqc, ctx, log_page_size);
460 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
463 struct mlx5_field_desc *desc;
464 struct mlx5_rsc_debug *d;
470 desc = filp->private_data;
471 d = (void *)(desc - desc->i) - sizeof(*d);
473 case MLX5_DBG_RSC_QP:
474 field = qp_read_field(d->dev, d->object, desc->i, &is_str);
477 case MLX5_DBG_RSC_EQ:
478 field = eq_read_field(d->dev, d->object, desc->i);
481 case MLX5_DBG_RSC_CQ:
482 field = cq_read_field(d->dev, d->object, desc->i);
486 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
491 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
493 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
495 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
498 static const struct file_operations fops = {
499 .owner = THIS_MODULE,
504 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
505 struct dentry *root, struct mlx5_rsc_debug **dbg,
506 int rsn, char **field, int nfile, void *data)
508 struct mlx5_rsc_debug *d;
512 d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
519 sprintf(resn, "0x%x", rsn);
520 d->root = debugfs_create_dir(resn, root);
522 for (i = 0; i < nfile; i++) {
524 debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
532 static void rem_res_tree(struct mlx5_rsc_debug *d)
534 debugfs_remove_recursive(d->root);
538 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
542 if (!mlx5_debugfs_root)
545 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
546 &qp->dbg, qp->qpn, qp_fields,
547 ARRAY_SIZE(qp_fields), qp);
553 EXPORT_SYMBOL(mlx5_debug_qp_add);
555 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
557 if (!mlx5_debugfs_root || !qp->dbg)
560 rem_res_tree(qp->dbg);
563 EXPORT_SYMBOL(mlx5_debug_qp_remove);
565 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
569 if (!mlx5_debugfs_root)
572 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
573 &eq->dbg, eq->eqn, eq_fields,
574 ARRAY_SIZE(eq_fields), eq);
581 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
583 if (!mlx5_debugfs_root)
587 rem_res_tree(eq->dbg);
590 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
594 if (!mlx5_debugfs_root)
597 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
598 &cq->dbg, cq->cqn, cq_fields,
599 ARRAY_SIZE(cq_fields), cq);
606 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
608 if (!mlx5_debugfs_root)
612 rem_res_tree(cq->dbg);