1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Intel Corporation
10 #include <linux/debugfs.h>
11 #include <linux/pm_runtime.h>
15 #define PORT_CAP_PCIE_LEN 1
16 #define PORT_CAP_POWER_LEN 2
17 #define PORT_CAP_LANE_LEN 3
18 #define PORT_CAP_USB3_LEN 5
19 #define PORT_CAP_DP_LEN 8
20 #define PORT_CAP_TMU_LEN 8
21 #define PORT_CAP_BASIC_LEN 9
22 #define PORT_CAP_USB4_LEN 20
24 #define SWITCH_CAP_TMU_LEN 26
25 #define SWITCH_CAP_BASIC_LEN 27
29 #define COUNTER_SET_LEN 3
31 #define DEBUGFS_ATTR(__space, __write) \
32 static int __space ## _open(struct inode *inode, struct file *file) \
34 return single_open(file, __space ## _show, inode->i_private); \
37 static const struct file_operations __space ## _fops = { \
38 .owner = THIS_MODULE, \
39 .open = __space ## _open, \
40 .release = single_release, \
43 .llseek = seq_lseek, \
46 #define DEBUGFS_ATTR_RO(__space) \
47 DEBUGFS_ATTR(__space, NULL)
49 #define DEBUGFS_ATTR_RW(__space) \
50 DEBUGFS_ATTR(__space, __space ## _write)
52 static struct dentry *tb_debugfs_root;
54 static void *validate_and_copy_from_user(const void __user *user_buf,
61 return ERR_PTR(-EINVAL);
63 if (!access_ok(user_buf, *count))
64 return ERR_PTR(-EFAULT);
66 buf = (void *)get_zeroed_page(GFP_KERNEL);
68 return ERR_PTR(-ENOMEM);
70 nbytes = min_t(size_t, *count, PAGE_SIZE);
71 if (copy_from_user(buf, user_buf, nbytes)) {
72 free_page((unsigned long)buf);
73 return ERR_PTR(-EFAULT);
80 static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
87 token = strsep(line, "\n");
92 * For Adapter/Router configuration space:
93 * Short format is: offset value\n
95 * Long format as produced from the read side:
96 * offset relative_offset cap_id vs_cap_id value\n
97 * v[0] v[1] v[2] v[3] v[4]
99 * For Counter configuration space:
100 * Short format is: offset\n
102 * Long format as produced from the read side:
103 * offset relative_offset counter_id value\n
104 * v[0] v[1] v[2] v[3]
106 ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]);
107 /* In case of Counters, clear counter, "val" content is NA */
108 if (ret == short_fmt_len) {
110 *val = v[short_fmt_len - 1];
112 } else if (ret == long_fmt_len) {
114 *val = v[long_fmt_len - 1];
121 #if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
122 static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
123 const char __user *user_buf, size_t count,
126 struct tb *tb = sw->tb;
131 buf = validate_and_copy_from_user(user_buf, &count);
135 pm_runtime_get_sync(&sw->dev);
137 if (mutex_lock_interruptible(&tb->lock)) {
142 /* User did hardware changes behind the driver's back */
143 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
146 while (parse_line(&line, &offset, &val, 2, 5)) {
148 ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
150 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
155 mutex_unlock(&tb->lock);
158 pm_runtime_mark_last_busy(&sw->dev);
159 pm_runtime_put_autosuspend(&sw->dev);
160 free_page((unsigned long)buf);
162 return ret < 0 ? ret : count;
165 static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
166 size_t count, loff_t *ppos)
168 struct seq_file *s = file->private_data;
169 struct tb_port *port = s->private;
171 return regs_write(port->sw, port, user_buf, count, ppos);
174 static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
175 size_t count, loff_t *ppos)
177 struct seq_file *s = file->private_data;
178 struct tb_switch *sw = s->private;
180 return regs_write(sw, NULL, user_buf, count, ppos);
182 #define DEBUGFS_MODE 0600
184 #define port_regs_write NULL
185 #define switch_regs_write NULL
186 #define DEBUGFS_MODE 0400
189 static int port_clear_all_counters(struct tb_port *port)
194 buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32),
199 ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0,
200 COUNTER_SET_LEN * port->config.max_counters);
206 static ssize_t counters_write(struct file *file, const char __user *user_buf,
207 size_t count, loff_t *ppos)
209 struct seq_file *s = file->private_data;
210 struct tb_port *port = s->private;
211 struct tb_switch *sw = port->sw;
212 struct tb *tb = port->sw->tb;
216 buf = validate_and_copy_from_user(user_buf, &count);
220 pm_runtime_get_sync(&sw->dev);
222 if (mutex_lock_interruptible(&tb->lock)) {
227 /* If written delimiter only, clear all counters in one shot */
228 if (buf[0] == '\n') {
229 ret = port_clear_all_counters(port);
235 while (parse_line(&line, &offset, &val, 1, 4)) {
236 ret = tb_port_write(port, &val, TB_CFG_COUNTERS,
243 mutex_unlock(&tb->lock);
246 pm_runtime_mark_last_busy(&sw->dev);
247 pm_runtime_put_autosuspend(&sw->dev);
248 free_page((unsigned long)buf);
250 return ret < 0 ? ret : count;
253 static void cap_show(struct seq_file *s, struct tb_switch *sw,
254 struct tb_port *port, unsigned int cap, u8 cap_id,
255 u8 vsec_id, int length)
260 int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH);
261 u32 data[TB_MAX_CONFIG_RW_LENGTH];
264 ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset,
267 ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
269 seq_printf(s, "0x%04x <not accessible>\n",
272 seq_printf(s, "0x%04x ...\n", cap + offset + 1);
276 for (i = 0; i < dwords; i++) {
277 seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n",
278 cap + offset + i, offset + i,
279 cap_id, vsec_id, data[i]);
287 static void port_cap_show(struct tb_port *port, struct seq_file *s,
290 struct tb_cap_any header;
295 ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1);
297 seq_printf(s, "0x%04x <capability read failed>\n", cap);
301 switch (header.basic.cap) {
302 case TB_PORT_CAP_PHY:
303 length = PORT_CAP_LANE_LEN;
306 case TB_PORT_CAP_TIME1:
307 length = PORT_CAP_TMU_LEN;
310 case TB_PORT_CAP_POWER:
311 length = PORT_CAP_POWER_LEN;
314 case TB_PORT_CAP_ADAP:
315 if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
316 length = PORT_CAP_PCIE_LEN;
317 } else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
318 length = PORT_CAP_DP_LEN;
319 } else if (tb_port_is_usb3_down(port) ||
320 tb_port_is_usb3_up(port)) {
321 length = PORT_CAP_USB3_LEN;
323 seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
324 cap, header.basic.cap);
329 case TB_PORT_CAP_VSE:
330 if (!header.extended_short.length) {
331 ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT,
334 seq_printf(s, "0x%04x <capability read failed>\n",
338 length = header.extended_long.length;
339 vsec_id = header.extended_short.vsec_id;
341 length = header.extended_short.length;
342 vsec_id = header.extended_short.vsec_id;
344 * Ice Lake and Tiger Lake do not implement the
345 * full length of the capability, only first 32
346 * dwords so hard-code it here.
349 (tb_switch_is_ice_lake(port->sw) ||
350 tb_switch_is_tiger_lake(port->sw)))
355 case TB_PORT_CAP_USB4:
356 length = PORT_CAP_USB4_LEN;
360 seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
361 cap, header.basic.cap);
365 cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length);
368 static void port_caps_show(struct tb_port *port, struct seq_file *s)
372 cap = tb_port_next_cap(port, 0);
374 port_cap_show(port, s, cap);
375 cap = tb_port_next_cap(port, cap);
379 static int port_basic_regs_show(struct tb_port *port, struct seq_file *s)
381 u32 data[PORT_CAP_BASIC_LEN];
384 ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data));
388 for (i = 0; i < ARRAY_SIZE(data); i++)
389 seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
394 static int port_regs_show(struct seq_file *s, void *not_used)
396 struct tb_port *port = s->private;
397 struct tb_switch *sw = port->sw;
398 struct tb *tb = sw->tb;
401 pm_runtime_get_sync(&sw->dev);
403 if (mutex_lock_interruptible(&tb->lock)) {
408 seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
410 ret = port_basic_regs_show(port, s);
414 port_caps_show(port, s);
417 mutex_unlock(&tb->lock);
419 pm_runtime_mark_last_busy(&sw->dev);
420 pm_runtime_put_autosuspend(&sw->dev);
424 DEBUGFS_ATTR_RW(port_regs);
426 static void switch_cap_show(struct tb_switch *sw, struct seq_file *s,
429 struct tb_cap_any header;
433 ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1);
435 seq_printf(s, "0x%04x <capability read failed>\n", cap);
439 if (header.basic.cap == TB_SWITCH_CAP_VSE) {
440 if (!header.extended_short.length) {
441 ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH,
444 seq_printf(s, "0x%04x <capability read failed>\n",
448 length = header.extended_long.length;
450 length = header.extended_short.length;
452 vsec_id = header.extended_short.vsec_id;
454 if (header.basic.cap == TB_SWITCH_CAP_TMU) {
455 length = SWITCH_CAP_TMU_LEN;
457 seq_printf(s, "0x%04x <unknown capability 0x%02x>\n",
458 cap, header.basic.cap);
463 cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length);
466 static void switch_caps_show(struct tb_switch *sw, struct seq_file *s)
470 cap = tb_switch_next_cap(sw, 0);
472 switch_cap_show(sw, s, cap);
473 cap = tb_switch_next_cap(sw, cap);
477 static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
479 u32 data[SWITCH_CAP_BASIC_LEN];
483 /* Only USB4 has the additional registers */
484 if (tb_switch_is_usb4(sw))
485 dwords = ARRAY_SIZE(data);
489 ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
493 for (i = 0; i < dwords; i++)
494 seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
499 static int switch_regs_show(struct seq_file *s, void *not_used)
501 struct tb_switch *sw = s->private;
502 struct tb *tb = sw->tb;
505 pm_runtime_get_sync(&sw->dev);
507 if (mutex_lock_interruptible(&tb->lock)) {
512 seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
514 ret = switch_basic_regs_show(sw, s);
518 switch_caps_show(sw, s);
521 mutex_unlock(&tb->lock);
523 pm_runtime_mark_last_busy(&sw->dev);
524 pm_runtime_put_autosuspend(&sw->dev);
528 DEBUGFS_ATTR_RW(switch_regs);
530 static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid)
535 ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN,
538 seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN);
542 for (i = 0; i < ARRAY_SIZE(data); i++) {
543 seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
544 hopid * PATH_LEN + i, i, hopid, data[i]);
550 static int path_show(struct seq_file *s, void *not_used)
552 struct tb_port *port = s->private;
553 struct tb_switch *sw = port->sw;
554 struct tb *tb = sw->tb;
555 int start, i, ret = 0;
557 pm_runtime_get_sync(&sw->dev);
559 if (mutex_lock_interruptible(&tb->lock)) {
564 seq_puts(s, "# offset relative_offset in_hop_id value\n");
566 /* NHI and lane adapters have entry for path 0 */
567 if (tb_port_is_null(port) || tb_port_is_nhi(port)) {
568 ret = path_show_one(port, s, 0);
573 start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID;
575 for (i = start; i <= port->config.max_in_hop_id; i++) {
576 ret = path_show_one(port, s, i);
582 mutex_unlock(&tb->lock);
584 pm_runtime_mark_last_busy(&sw->dev);
585 pm_runtime_put_autosuspend(&sw->dev);
589 DEBUGFS_ATTR_RO(path);
591 static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
594 u32 data[COUNTER_SET_LEN];
597 ret = tb_port_read(port, data, TB_CFG_COUNTERS,
598 counter * COUNTER_SET_LEN, ARRAY_SIZE(data));
600 seq_printf(s, "0x%04x <not accessible>\n",
601 counter * COUNTER_SET_LEN);
605 for (i = 0; i < ARRAY_SIZE(data); i++) {
606 seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
607 counter * COUNTER_SET_LEN + i, i, counter, data[i]);
613 static int counters_show(struct seq_file *s, void *not_used)
615 struct tb_port *port = s->private;
616 struct tb_switch *sw = port->sw;
617 struct tb *tb = sw->tb;
620 pm_runtime_get_sync(&sw->dev);
622 if (mutex_lock_interruptible(&tb->lock)) {
627 seq_puts(s, "# offset relative_offset counter_id value\n");
629 for (i = 0; i < port->config.max_counters; i++) {
630 ret = counter_set_regs_show(port, s, i);
635 mutex_unlock(&tb->lock);
638 pm_runtime_mark_last_busy(&sw->dev);
639 pm_runtime_put_autosuspend(&sw->dev);
643 DEBUGFS_ATTR_RW(counters);
646 * tb_switch_debugfs_init() - Add debugfs entries for router
647 * @sw: Pointer to the router
649 * Adds debugfs directories and files for given router.
651 void tb_switch_debugfs_init(struct tb_switch *sw)
653 struct dentry *debugfs_dir;
654 struct tb_port *port;
656 debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root);
657 sw->debugfs_dir = debugfs_dir;
658 debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
661 tb_switch_for_each_port(sw, port) {
662 struct dentry *debugfs_dir;
667 if (port->config.type == TB_TYPE_INACTIVE)
670 snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
671 debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir);
672 debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir,
673 port, &port_regs_fops);
674 debugfs_create_file("path", 0400, debugfs_dir, port,
676 if (port->config.counters_support)
677 debugfs_create_file("counters", 0600, debugfs_dir, port,
683 * tb_switch_debugfs_remove() - Remove all router debugfs entries
684 * @sw: Pointer to the router
686 * Removes all previously added debugfs entries under this router.
688 void tb_switch_debugfs_remove(struct tb_switch *sw)
690 debugfs_remove_recursive(sw->debugfs_dir);
693 void tb_debugfs_init(void)
695 tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
698 void tb_debugfs_exit(void)
700 debugfs_remove_recursive(tb_debugfs_root);