1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
16 #include "qed_reg_addr.h"
18 /* Memory groups enum */
35 MEM_GROUP_CONN_CFC_MEM,
36 MEM_GROUP_TASK_CFC_MEM,
51 /* Memory groups names */
52 static const char * const s_mem_group_names[] = {
83 /* Idle check conditions */
85 static u32 cond5(const u32 *r, const u32 *imm)
87 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
90 static u32 cond7(const u32 *r, const u32 *imm)
92 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
95 static u32 cond14(const u32 *r, const u32 *imm)
97 return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
100 static u32 cond6(const u32 *r, const u32 *imm)
102 return (r[0] & imm[0]) != imm[1];
105 static u32 cond9(const u32 *r, const u32 *imm)
107 return ((r[0] & imm[0]) >> imm[1]) !=
108 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
111 static u32 cond10(const u32 *r, const u32 *imm)
113 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
116 static u32 cond4(const u32 *r, const u32 *imm)
118 return (r[0] & ~imm[0]) != imm[1];
121 static u32 cond0(const u32 *r, const u32 *imm)
123 return (r[0] & ~r[1]) != imm[0];
126 static u32 cond1(const u32 *r, const u32 *imm)
128 return r[0] != imm[0];
131 static u32 cond11(const u32 *r, const u32 *imm)
133 return r[0] != r[1] && r[2] == imm[0];
136 static u32 cond12(const u32 *r, const u32 *imm)
138 return r[0] != r[1] && r[2] > imm[0];
141 static u32 cond3(const u32 *r, const u32 *imm)
146 static u32 cond13(const u32 *r, const u32 *imm)
148 return r[0] & imm[0];
151 static u32 cond8(const u32 *r, const u32 *imm)
153 return r[0] < (r[1] - imm[0]);
156 static u32 cond2(const u32 *r, const u32 *imm)
158 return r[0] > imm[0];
161 /* Array of Idle Check conditions */
162 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
180 /******************************* Data Types **********************************/
190 struct chip_platform_defs {
196 /* Chip constant definitions */
199 struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
202 /* Platform constant definitions */
203 struct platform_defs {
208 /* Storm constant definitions.
209 * Addresses are in bytes, sizes are in quad-regs.
213 enum block_id block_id;
214 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
216 u32 sem_fast_mem_addr;
217 u32 sem_frame_mode_addr;
218 u32 sem_slow_enable_addr;
219 u32 sem_slow_mode_addr;
220 u32 sem_slow_mode1_conf_addr;
221 u32 sem_sync_dbg_empty_addr;
222 u32 sem_slow_dbg_empty_addr;
224 u32 cm_conn_ag_ctx_lid_size;
225 u32 cm_conn_ag_ctx_rd_addr;
226 u32 cm_conn_st_ctx_lid_size;
227 u32 cm_conn_st_ctx_rd_addr;
228 u32 cm_task_ag_ctx_lid_size;
229 u32 cm_task_ag_ctx_rd_addr;
230 u32 cm_task_st_ctx_lid_size;
231 u32 cm_task_st_ctx_rd_addr;
234 /* Block constant definitions */
237 bool has_dbg_bus[MAX_CHIP_IDS];
238 bool associated_to_storm;
240 /* Valid only if associated_to_storm is true */
242 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
246 u32 dbg_force_valid_addr;
247 u32 dbg_force_frame_addr;
250 /* If true, block is taken out of reset before dump */
252 enum dbg_reset_regs reset_reg;
254 /* Bit offset in reset register */
258 /* Reset register definitions */
259 struct reset_reg_defs {
262 bool exists[MAX_CHIP_IDS];
265 struct grc_param_defs {
266 u32 default_val[MAX_CHIP_IDS];
270 u32 exclude_all_preset_val;
271 u32 crash_preset_val;
274 /* Address is in 128b units. Width is in bits. */
275 struct rss_mem_defs {
276 const char *mem_name;
277 const char *type_name;
279 u32 num_entries[MAX_CHIP_IDS];
280 u32 entry_width[MAX_CHIP_IDS];
283 struct vfc_ram_defs {
284 const char *mem_name;
285 const char *type_name;
290 struct big_ram_defs {
291 const char *instance_name;
292 enum mem_groups mem_group_id;
293 enum mem_groups ram_mem_group_id;
294 enum dbg_grc_params grc_param;
297 u32 num_of_blocks[MAX_CHIP_IDS];
301 const char *phy_name;
303 /* PHY base GRC address */
306 /* Relative address of indirect TBUS address register (bits 0..7) */
307 u32 tbus_addr_lo_addr;
309 /* Relative address of indirect TBUS address register (bits 8..10) */
310 u32 tbus_addr_hi_addr;
312 /* Relative address of indirect TBUS data register (bits 0..7) */
313 u32 tbus_data_lo_addr;
315 /* Relative address of indirect TBUS data register (bits 8..11) */
316 u32 tbus_data_hi_addr;
319 /******************************** Constants **********************************/
321 #define MAX_LCIDS 320
322 #define MAX_LTIDS 320
324 #define NUM_IOR_SETS 2
325 #define IORS_PER_SET 176
326 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
328 #define BYTES_IN_DWORD sizeof(u32)
330 /* In the macros below, size and offset are specified in bits */
331 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
332 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
333 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
334 #define FIELD_DWORD_OFFSET(type, field) \
335 (int)(FIELD_BIT_OFFSET(type, field) / 32)
336 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
337 #define FIELD_BIT_MASK(type, field) \
338 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
339 FIELD_DWORD_SHIFT(type, field))
341 #define SET_VAR_FIELD(var, type, field, val) \
343 var[FIELD_DWORD_OFFSET(type, field)] &= \
344 (~FIELD_BIT_MASK(type, field)); \
345 var[FIELD_DWORD_OFFSET(type, field)] |= \
346 (val) << FIELD_DWORD_SHIFT(type, field); \
349 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
351 for (i = 0; i < (arr_size); i++) \
352 qed_wr(dev, ptt, addr, (arr)[i]); \
355 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
357 for (i = 0; i < (arr_size); i++) \
358 (arr)[i] = qed_rd(dev, ptt, addr); \
361 #ifndef DWORDS_TO_BYTES
362 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
364 #ifndef BYTES_TO_DWORDS
365 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
368 /* extra lines include a signature line + optional latency events line */
369 #ifndef NUM_DBG_LINES
370 #define NUM_EXTRA_DBG_LINES(block_desc) \
371 (1 + ((block_desc)->has_latency_events ? 1 : 0))
372 #define NUM_DBG_LINES(block_desc) \
373 ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
376 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
377 #define RAM_LINES_TO_BYTES(lines) \
378 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
380 #define REG_DUMP_LEN_SHIFT 24
381 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
382 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
384 #define IDLE_CHK_RULE_SIZE_DWORDS \
385 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
387 #define IDLE_CHK_RESULT_HDR_DWORDS \
388 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
390 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
391 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
393 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
395 /* The sizes and offsets below are specified in bits */
396 #define VFC_CAM_CMD_STRUCT_SIZE 64
397 #define VFC_CAM_CMD_ROW_OFFSET 48
398 #define VFC_CAM_CMD_ROW_SIZE 9
399 #define VFC_CAM_ADDR_STRUCT_SIZE 16
400 #define VFC_CAM_ADDR_OP_OFFSET 0
401 #define VFC_CAM_ADDR_OP_SIZE 4
402 #define VFC_CAM_RESP_STRUCT_SIZE 256
403 #define VFC_RAM_ADDR_STRUCT_SIZE 16
404 #define VFC_RAM_ADDR_OP_OFFSET 0
405 #define VFC_RAM_ADDR_OP_SIZE 2
406 #define VFC_RAM_ADDR_ROW_OFFSET 2
407 #define VFC_RAM_ADDR_ROW_SIZE 10
408 #define VFC_RAM_RESP_STRUCT_SIZE 256
410 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
411 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
412 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
413 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
414 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
415 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
417 #define NUM_VFC_RAM_TYPES 4
419 #define VFC_CAM_NUM_ROWS 512
421 #define VFC_OPCODE_CAM_RD 14
422 #define VFC_OPCODE_RAM_RD 0
424 #define NUM_RSS_MEM_TYPES 5
426 #define NUM_BIG_RAM_TYPES 3
427 #define BIG_RAM_BLOCK_SIZE_BYTES 128
428 #define BIG_RAM_BLOCK_SIZE_DWORDS \
429 BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
431 #define NUM_PHY_TBUS_ADDRESSES 2048
432 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
434 #define RESET_REG_UNRESET_OFFSET 4
436 #define STALL_DELAY_MS 500
438 #define STATIC_DEBUG_LINE_DWORDS 9
440 #define NUM_COMMON_GLOBAL_PARAMS 8
442 #define FW_IMG_MAIN 1
444 #ifndef REG_FIFO_ELEMENT_DWORDS
445 #define REG_FIFO_ELEMENT_DWORDS 2
447 #define REG_FIFO_DEPTH_ELEMENTS 32
448 #define REG_FIFO_DEPTH_DWORDS \
449 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
451 #ifndef IGU_FIFO_ELEMENT_DWORDS
452 #define IGU_FIFO_ELEMENT_DWORDS 4
454 #define IGU_FIFO_DEPTH_ELEMENTS 64
455 #define IGU_FIFO_DEPTH_DWORDS \
456 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
458 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
459 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
461 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
462 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
463 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
464 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
466 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
468 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
470 #define EMPTY_FW_VERSION_STR "???_???_???_???"
471 #define EMPTY_FW_IMAGE_STR "???????????????"
473 /***************************** Constant Arrays *******************************/
481 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
483 /* Chip constant definitions array */
484 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
486 {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
491 {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
497 /* Storm constant definitions array */
498 static struct storm_defs s_storm_defs[] = {
501 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
502 TSEM_REG_FAST_MEMORY,
503 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
504 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
505 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
506 TCM_REG_CTX_RBC_ACCS,
507 4, TCM_REG_AGG_CON_CTX,
508 16, TCM_REG_SM_CON_CTX,
509 2, TCM_REG_AGG_TASK_CTX,
510 4, TCM_REG_SM_TASK_CTX},
514 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
515 MSEM_REG_FAST_MEMORY,
516 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
517 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
518 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
519 MCM_REG_CTX_RBC_ACCS,
520 1, MCM_REG_AGG_CON_CTX,
521 10, MCM_REG_SM_CON_CTX,
522 2, MCM_REG_AGG_TASK_CTX,
523 7, MCM_REG_SM_TASK_CTX},
527 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
528 USEM_REG_FAST_MEMORY,
529 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
530 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
531 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
532 UCM_REG_CTX_RBC_ACCS,
533 2, UCM_REG_AGG_CON_CTX,
534 13, UCM_REG_SM_CON_CTX,
535 3, UCM_REG_AGG_TASK_CTX,
536 3, UCM_REG_SM_TASK_CTX},
540 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
541 XSEM_REG_FAST_MEMORY,
542 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
543 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
544 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
545 XCM_REG_CTX_RBC_ACCS,
546 9, XCM_REG_AGG_CON_CTX,
547 15, XCM_REG_SM_CON_CTX,
553 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
554 YSEM_REG_FAST_MEMORY,
555 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
556 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
557 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
558 YCM_REG_CTX_RBC_ACCS,
559 2, YCM_REG_AGG_CON_CTX,
560 3, YCM_REG_SM_CON_CTX,
561 2, YCM_REG_AGG_TASK_CTX,
562 12, YCM_REG_SM_TASK_CTX},
566 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
567 PSEM_REG_FAST_MEMORY,
568 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
569 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
570 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
571 PCM_REG_CTX_RBC_ACCS,
573 10, PCM_REG_SM_CON_CTX,
578 /* Block definitions array */
580 static struct block_defs block_grc_defs = {
582 {true, true}, false, 0,
583 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
584 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
585 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
586 GRC_REG_DBG_FORCE_FRAME,
587 true, false, DBG_RESET_REG_MISC_PL_UA, 1
590 static struct block_defs block_miscs_defs = {
591 "miscs", {false, false}, false, 0,
592 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
594 false, false, MAX_DBG_RESET_REGS, 0
597 static struct block_defs block_misc_defs = {
598 "misc", {false, false}, false, 0,
599 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
601 false, false, MAX_DBG_RESET_REGS, 0
604 static struct block_defs block_dbu_defs = {
605 "dbu", {false, false}, false, 0,
606 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
608 false, false, MAX_DBG_RESET_REGS, 0
611 static struct block_defs block_pglue_b_defs = {
613 {true, true}, false, 0,
614 {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
615 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
616 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
617 PGLUE_B_REG_DBG_FORCE_FRAME,
618 true, false, DBG_RESET_REG_MISCS_PL_HV, 1
621 static struct block_defs block_cnig_defs = {
623 {false, true}, false, 0,
624 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
625 CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
626 CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
627 CNIG_REG_DBG_FORCE_FRAME_K2,
628 true, false, DBG_RESET_REG_MISCS_PL_HV, 0
631 static struct block_defs block_cpmu_defs = {
632 "cpmu", {false, false}, false, 0,
633 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
635 true, false, DBG_RESET_REG_MISCS_PL_HV, 8
638 static struct block_defs block_ncsi_defs = {
640 {true, true}, false, 0,
641 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
642 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
643 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
644 NCSI_REG_DBG_FORCE_FRAME,
645 true, false, DBG_RESET_REG_MISCS_PL_HV, 5
648 static struct block_defs block_opte_defs = {
649 "opte", {false, false}, false, 0,
650 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
652 true, false, DBG_RESET_REG_MISCS_PL_HV, 4
655 static struct block_defs block_bmb_defs = {
657 {true, true}, false, 0,
658 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
659 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
660 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
661 BMB_REG_DBG_FORCE_FRAME,
662 true, false, DBG_RESET_REG_MISCS_PL_UA, 7
665 static struct block_defs block_pcie_defs = {
667 {false, true}, false, 0,
668 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
669 PCIE_REG_DBG_COMMON_SELECT_K2,
670 PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
671 PCIE_REG_DBG_COMMON_SHIFT_K2,
672 PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
673 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
674 false, false, MAX_DBG_RESET_REGS, 0
677 static struct block_defs block_mcp_defs = {
678 "mcp", {false, false}, false, 0,
679 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
681 false, false, MAX_DBG_RESET_REGS, 0
684 static struct block_defs block_mcp2_defs = {
686 {true, true}, false, 0,
687 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
688 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
689 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
690 MCP2_REG_DBG_FORCE_FRAME,
691 false, false, MAX_DBG_RESET_REGS, 0
694 static struct block_defs block_pswhst_defs = {
696 {true, true}, false, 0,
697 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
698 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
699 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
700 PSWHST_REG_DBG_FORCE_FRAME,
701 true, false, DBG_RESET_REG_MISC_PL_HV, 0
704 static struct block_defs block_pswhst2_defs = {
706 {true, true}, false, 0,
707 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
708 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
709 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
710 PSWHST2_REG_DBG_FORCE_FRAME,
711 true, false, DBG_RESET_REG_MISC_PL_HV, 0
714 static struct block_defs block_pswrd_defs = {
716 {true, true}, false, 0,
717 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
718 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
719 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
720 PSWRD_REG_DBG_FORCE_FRAME,
721 true, false, DBG_RESET_REG_MISC_PL_HV, 2
724 static struct block_defs block_pswrd2_defs = {
726 {true, true}, false, 0,
727 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
728 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
729 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
730 PSWRD2_REG_DBG_FORCE_FRAME,
731 true, false, DBG_RESET_REG_MISC_PL_HV, 2
734 static struct block_defs block_pswwr_defs = {
736 {true, true}, false, 0,
737 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
738 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
739 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
740 PSWWR_REG_DBG_FORCE_FRAME,
741 true, false, DBG_RESET_REG_MISC_PL_HV, 3
744 static struct block_defs block_pswwr2_defs = {
745 "pswwr2", {false, false}, false, 0,
746 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
748 true, false, DBG_RESET_REG_MISC_PL_HV, 3
751 static struct block_defs block_pswrq_defs = {
753 {true, true}, false, 0,
754 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
755 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
756 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
757 PSWRQ_REG_DBG_FORCE_FRAME,
758 true, false, DBG_RESET_REG_MISC_PL_HV, 1
761 static struct block_defs block_pswrq2_defs = {
763 {true, true}, false, 0,
764 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
765 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
766 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
767 PSWRQ2_REG_DBG_FORCE_FRAME,
768 true, false, DBG_RESET_REG_MISC_PL_HV, 1
771 static struct block_defs block_pglcs_defs = {
773 {false, true}, false, 0,
774 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
775 PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
776 PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
777 PGLCS_REG_DBG_FORCE_FRAME_K2,
778 true, false, DBG_RESET_REG_MISCS_PL_HV, 2
781 static struct block_defs block_ptu_defs = {
783 {true, true}, false, 0,
784 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
785 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
786 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
787 PTU_REG_DBG_FORCE_FRAME,
788 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
791 static struct block_defs block_dmae_defs = {
793 {true, true}, false, 0,
794 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
795 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
796 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
797 DMAE_REG_DBG_FORCE_FRAME,
798 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
801 static struct block_defs block_tcm_defs = {
803 {true, true}, true, DBG_TSTORM_ID,
804 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
805 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
806 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
807 TCM_REG_DBG_FORCE_FRAME,
808 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
811 static struct block_defs block_mcm_defs = {
813 {true, true}, true, DBG_MSTORM_ID,
814 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
815 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
816 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
817 MCM_REG_DBG_FORCE_FRAME,
818 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
821 static struct block_defs block_ucm_defs = {
823 {true, true}, true, DBG_USTORM_ID,
824 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
825 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
826 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
827 UCM_REG_DBG_FORCE_FRAME,
828 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
831 static struct block_defs block_xcm_defs = {
833 {true, true}, true, DBG_XSTORM_ID,
834 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
835 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
836 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
837 XCM_REG_DBG_FORCE_FRAME,
838 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
841 static struct block_defs block_ycm_defs = {
843 {true, true}, true, DBG_YSTORM_ID,
844 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
845 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
846 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
847 YCM_REG_DBG_FORCE_FRAME,
848 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
851 static struct block_defs block_pcm_defs = {
853 {true, true}, true, DBG_PSTORM_ID,
854 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
855 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
856 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
857 PCM_REG_DBG_FORCE_FRAME,
858 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
861 static struct block_defs block_qm_defs = {
863 {true, true}, false, 0,
864 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
865 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
866 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
867 QM_REG_DBG_FORCE_FRAME,
868 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
871 static struct block_defs block_tm_defs = {
873 {true, true}, false, 0,
874 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
875 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
876 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
877 TM_REG_DBG_FORCE_FRAME,
878 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
881 static struct block_defs block_dorq_defs = {
883 {true, true}, false, 0,
884 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
885 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
886 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
887 DORQ_REG_DBG_FORCE_FRAME,
888 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
891 static struct block_defs block_brb_defs = {
893 {true, true}, false, 0,
894 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
895 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
896 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
897 BRB_REG_DBG_FORCE_FRAME,
898 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
901 static struct block_defs block_src_defs = {
903 {true, true}, false, 0,
904 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
905 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
906 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
907 SRC_REG_DBG_FORCE_FRAME,
908 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
911 static struct block_defs block_prs_defs = {
913 {true, true}, false, 0,
914 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
915 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
916 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
917 PRS_REG_DBG_FORCE_FRAME,
918 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
921 static struct block_defs block_tsdm_defs = {
923 {true, true}, true, DBG_TSTORM_ID,
924 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
925 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
926 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
927 TSDM_REG_DBG_FORCE_FRAME,
928 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
931 static struct block_defs block_msdm_defs = {
933 {true, true}, true, DBG_MSTORM_ID,
934 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
935 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
936 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
937 MSDM_REG_DBG_FORCE_FRAME,
938 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
941 static struct block_defs block_usdm_defs = {
943 {true, true}, true, DBG_USTORM_ID,
944 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
945 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
946 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
947 USDM_REG_DBG_FORCE_FRAME,
948 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
951 static struct block_defs block_xsdm_defs = {
953 {true, true}, true, DBG_XSTORM_ID,
954 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
955 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
956 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
957 XSDM_REG_DBG_FORCE_FRAME,
958 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
961 static struct block_defs block_ysdm_defs = {
963 {true, true}, true, DBG_YSTORM_ID,
964 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
965 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
966 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
967 YSDM_REG_DBG_FORCE_FRAME,
968 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
971 static struct block_defs block_psdm_defs = {
973 {true, true}, true, DBG_PSTORM_ID,
974 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
975 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
976 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
977 PSDM_REG_DBG_FORCE_FRAME,
978 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
981 static struct block_defs block_tsem_defs = {
983 {true, true}, true, DBG_TSTORM_ID,
984 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
985 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
986 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
987 TSEM_REG_DBG_FORCE_FRAME,
988 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
991 static struct block_defs block_msem_defs = {
993 {true, true}, true, DBG_MSTORM_ID,
994 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
995 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
996 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
997 MSEM_REG_DBG_FORCE_FRAME,
998 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
1001 static struct block_defs block_usem_defs = {
1003 {true, true}, true, DBG_USTORM_ID,
1004 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1005 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1006 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1007 USEM_REG_DBG_FORCE_FRAME,
1008 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
1011 static struct block_defs block_xsem_defs = {
1013 {true, true}, true, DBG_XSTORM_ID,
1014 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1015 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1016 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1017 XSEM_REG_DBG_FORCE_FRAME,
1018 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1021 static struct block_defs block_ysem_defs = {
1023 {true, true}, true, DBG_YSTORM_ID,
1024 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
1025 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1026 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1027 YSEM_REG_DBG_FORCE_FRAME,
1028 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1031 static struct block_defs block_psem_defs = {
1033 {true, true}, true, DBG_PSTORM_ID,
1034 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1035 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1036 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1037 PSEM_REG_DBG_FORCE_FRAME,
1038 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1041 static struct block_defs block_rss_defs = {
1043 {true, true}, false, 0,
1044 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1045 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1046 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1047 RSS_REG_DBG_FORCE_FRAME,
1048 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1051 static struct block_defs block_tmld_defs = {
1053 {true, true}, false, 0,
1054 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1055 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1056 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1057 TMLD_REG_DBG_FORCE_FRAME,
1058 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1061 static struct block_defs block_muld_defs = {
1063 {true, true}, false, 0,
1064 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1065 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1066 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1067 MULD_REG_DBG_FORCE_FRAME,
1068 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1071 static struct block_defs block_yuld_defs = {
1073 {true, true}, false, 0,
1074 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1075 YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1076 YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1077 YULD_REG_DBG_FORCE_FRAME_BB_K2,
1078 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1082 static struct block_defs block_xyld_defs = {
1084 {true, true}, false, 0,
1085 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1086 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1087 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1088 XYLD_REG_DBG_FORCE_FRAME,
1089 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1092 static struct block_defs block_prm_defs = {
1094 {true, true}, false, 0,
1095 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1096 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1097 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1098 PRM_REG_DBG_FORCE_FRAME,
1099 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1102 static struct block_defs block_pbf_pb1_defs = {
1104 {true, true}, false, 0,
1105 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1106 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1107 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1108 PBF_PB1_REG_DBG_FORCE_FRAME,
1109 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1113 static struct block_defs block_pbf_pb2_defs = {
1115 {true, true}, false, 0,
1116 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1117 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1118 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1119 PBF_PB2_REG_DBG_FORCE_FRAME,
1120 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1124 static struct block_defs block_rpb_defs = {
1126 {true, true}, false, 0,
1127 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1128 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1129 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1130 RPB_REG_DBG_FORCE_FRAME,
1131 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1134 static struct block_defs block_btb_defs = {
1136 {true, true}, false, 0,
1137 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
1138 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1139 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1140 BTB_REG_DBG_FORCE_FRAME,
1141 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1144 static struct block_defs block_pbf_defs = {
1146 {true, true}, false, 0,
1147 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1148 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1149 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1150 PBF_REG_DBG_FORCE_FRAME,
1151 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1154 static struct block_defs block_rdif_defs = {
1156 {true, true}, false, 0,
1157 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1158 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1159 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1160 RDIF_REG_DBG_FORCE_FRAME,
1161 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1164 static struct block_defs block_tdif_defs = {
1166 {true, true}, false, 0,
1167 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1168 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1169 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1170 TDIF_REG_DBG_FORCE_FRAME,
1171 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1174 static struct block_defs block_cdu_defs = {
1176 {true, true}, false, 0,
1177 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1178 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1179 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1180 CDU_REG_DBG_FORCE_FRAME,
1181 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1184 static struct block_defs block_ccfc_defs = {
1186 {true, true}, false, 0,
1187 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1188 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1189 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1190 CCFC_REG_DBG_FORCE_FRAME,
1191 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1194 static struct block_defs block_tcfc_defs = {
1196 {true, true}, false, 0,
1197 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1198 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1199 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1200 TCFC_REG_DBG_FORCE_FRAME,
1201 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1204 static struct block_defs block_igu_defs = {
1206 {true, true}, false, 0,
1207 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1208 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1209 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1210 IGU_REG_DBG_FORCE_FRAME,
1211 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1214 static struct block_defs block_cau_defs = {
1216 {true, true}, false, 0,
1217 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1218 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1219 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1220 CAU_REG_DBG_FORCE_FRAME,
1221 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1224 static struct block_defs block_umac_defs = {
1226 {false, true}, false, 0,
1227 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1228 UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
1229 UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
1230 UMAC_REG_DBG_FORCE_FRAME_K2,
1231 true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1234 static struct block_defs block_xmac_defs = {
1235 "xmac", {false, false}, false, 0,
1236 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1238 false, false, MAX_DBG_RESET_REGS, 0
1241 static struct block_defs block_dbg_defs = {
1242 "dbg", {false, false}, false, 0,
1243 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1245 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1248 static struct block_defs block_nig_defs = {
1250 {true, true}, false, 0,
1251 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1252 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1253 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1254 NIG_REG_DBG_FORCE_FRAME,
1255 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1258 static struct block_defs block_wol_defs = {
1260 {false, true}, false, 0,
1261 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1262 WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
1263 WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
1264 WOL_REG_DBG_FORCE_FRAME_K2,
1265 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1268 static struct block_defs block_bmbn_defs = {
1270 {false, true}, false, 0,
1271 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
1272 BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
1273 BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
1274 BMBN_REG_DBG_FORCE_FRAME_K2,
1275 false, false, MAX_DBG_RESET_REGS, 0
1278 static struct block_defs block_ipc_defs = {
1279 "ipc", {false, false}, false, 0,
1280 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1282 true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1285 static struct block_defs block_nwm_defs = {
1287 {false, true}, false, 0,
1288 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1289 NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
1290 NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
1291 NWM_REG_DBG_FORCE_FRAME_K2,
1292 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1295 static struct block_defs block_nws_defs = {
1297 {false, true}, false, 0,
1298 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1299 NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
1300 NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
1301 NWS_REG_DBG_FORCE_FRAME_K2,
1302 true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1305 static struct block_defs block_ms_defs = {
1307 {false, true}, false, 0,
1308 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1309 MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
1310 MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
1311 MS_REG_DBG_FORCE_FRAME_K2,
1312 true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1315 static struct block_defs block_phy_pcie_defs = {
1317 {false, true}, false, 0,
1318 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1319 PCIE_REG_DBG_COMMON_SELECT_K2,
1320 PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
1321 PCIE_REG_DBG_COMMON_SHIFT_K2,
1322 PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
1323 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
1324 false, false, MAX_DBG_RESET_REGS, 0
1327 static struct block_defs block_led_defs = {
1328 "led", {false, false}, false, 0,
1329 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1331 true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1334 static struct block_defs block_avs_wrap_defs = {
1335 "avs_wrap", {false, false}, false, 0,
1336 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1338 true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1341 static struct block_defs block_rgfs_defs = {
1342 "rgfs", {false, false}, false, 0,
1343 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1345 false, false, MAX_DBG_RESET_REGS, 0
1348 static struct block_defs block_rgsrc_defs = {
1349 "rgsrc", {false, false}, false, 0,
1350 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1352 false, false, MAX_DBG_RESET_REGS, 0
1355 static struct block_defs block_tgfs_defs = {
1356 "tgfs", {false, false}, false, 0,
1357 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1359 false, false, MAX_DBG_RESET_REGS, 0
1362 static struct block_defs block_tgsrc_defs = {
1363 "tgsrc", {false, false}, false, 0,
1364 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1366 false, false, MAX_DBG_RESET_REGS, 0
1369 static struct block_defs block_ptld_defs = {
1370 "ptld", {false, false}, false, 0,
1371 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1373 false, false, MAX_DBG_RESET_REGS, 0
1376 static struct block_defs block_ypld_defs = {
1377 "ypld", {false, false}, false, 0,
1378 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1380 false, false, MAX_DBG_RESET_REGS, 0
1383 static struct block_defs block_misc_aeu_defs = {
1384 "misc_aeu", {false, false}, false, 0,
1385 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1387 false, false, MAX_DBG_RESET_REGS, 0
1390 static struct block_defs block_bar0_map_defs = {
1391 "bar0_map", {false, false}, false, 0,
1392 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1394 false, false, MAX_DBG_RESET_REGS, 0
1397 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1402 &block_pglue_b_defs,
1412 &block_pswhst2_defs,
1454 &block_pbf_pb1_defs,
1455 &block_pbf_pb2_defs,
1480 &block_phy_pcie_defs,
1482 &block_avs_wrap_defs,
1483 &block_misc_aeu_defs,
1484 &block_bar0_map_defs,
1487 static struct platform_defs s_platform_defs[] = {
1494 static struct grc_param_defs s_grc_param_defs[] = {
1495 /* DBG_GRC_PARAM_DUMP_TSTORM */
1496 {{1, 1}, 0, 1, false, 1, 1},
1498 /* DBG_GRC_PARAM_DUMP_MSTORM */
1499 {{1, 1}, 0, 1, false, 1, 1},
1501 /* DBG_GRC_PARAM_DUMP_USTORM */
1502 {{1, 1}, 0, 1, false, 1, 1},
1504 /* DBG_GRC_PARAM_DUMP_XSTORM */
1505 {{1, 1}, 0, 1, false, 1, 1},
1507 /* DBG_GRC_PARAM_DUMP_YSTORM */
1508 {{1, 1}, 0, 1, false, 1, 1},
1510 /* DBG_GRC_PARAM_DUMP_PSTORM */
1511 {{1, 1}, 0, 1, false, 1, 1},
1513 /* DBG_GRC_PARAM_DUMP_REGS */
1514 {{1, 1}, 0, 1, false, 0, 1},
1516 /* DBG_GRC_PARAM_DUMP_RAM */
1517 {{1, 1}, 0, 1, false, 0, 1},
1519 /* DBG_GRC_PARAM_DUMP_PBUF */
1520 {{1, 1}, 0, 1, false, 0, 1},
1522 /* DBG_GRC_PARAM_DUMP_IOR */
1523 {{0, 0}, 0, 1, false, 0, 1},
1525 /* DBG_GRC_PARAM_DUMP_VFC */
1526 {{0, 0}, 0, 1, false, 0, 1},
1528 /* DBG_GRC_PARAM_DUMP_CM_CTX */
1529 {{1, 1}, 0, 1, false, 0, 1},
1531 /* DBG_GRC_PARAM_DUMP_ILT */
1532 {{1, 1}, 0, 1, false, 0, 1},
1534 /* DBG_GRC_PARAM_DUMP_RSS */
1535 {{1, 1}, 0, 1, false, 0, 1},
1537 /* DBG_GRC_PARAM_DUMP_CAU */
1538 {{1, 1}, 0, 1, false, 0, 1},
1540 /* DBG_GRC_PARAM_DUMP_QM */
1541 {{1, 1}, 0, 1, false, 0, 1},
1543 /* DBG_GRC_PARAM_DUMP_MCP */
1544 {{1, 1}, 0, 1, false, 0, 1},
1546 /* DBG_GRC_PARAM_RESERVED */
1547 {{1, 1}, 0, 1, false, 0, 1},
1549 /* DBG_GRC_PARAM_DUMP_CFC */
1550 {{1, 1}, 0, 1, false, 0, 1},
1552 /* DBG_GRC_PARAM_DUMP_IGU */
1553 {{1, 1}, 0, 1, false, 0, 1},
1555 /* DBG_GRC_PARAM_DUMP_BRB */
1556 {{0, 0}, 0, 1, false, 0, 1},
1558 /* DBG_GRC_PARAM_DUMP_BTB */
1559 {{0, 0}, 0, 1, false, 0, 1},
1561 /* DBG_GRC_PARAM_DUMP_BMB */
1562 {{0, 0}, 0, 1, false, 0, 1},
1564 /* DBG_GRC_PARAM_DUMP_NIG */
1565 {{1, 1}, 0, 1, false, 0, 1},
1567 /* DBG_GRC_PARAM_DUMP_MULD */
1568 {{1, 1}, 0, 1, false, 0, 1},
1570 /* DBG_GRC_PARAM_DUMP_PRS */
1571 {{1, 1}, 0, 1, false, 0, 1},
1573 /* DBG_GRC_PARAM_DUMP_DMAE */
1574 {{1, 1}, 0, 1, false, 0, 1},
1576 /* DBG_GRC_PARAM_DUMP_TM */
1577 {{1, 1}, 0, 1, false, 0, 1},
1579 /* DBG_GRC_PARAM_DUMP_SDM */
1580 {{1, 1}, 0, 1, false, 0, 1},
1582 /* DBG_GRC_PARAM_DUMP_DIF */
1583 {{1, 1}, 0, 1, false, 0, 1},
1585 /* DBG_GRC_PARAM_DUMP_STATIC */
1586 {{1, 1}, 0, 1, false, 0, 1},
1588 /* DBG_GRC_PARAM_UNSTALL */
1589 {{0, 0}, 0, 1, false, 0, 0},
1591 /* DBG_GRC_PARAM_NUM_LCIDS */
1592 {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
1595 /* DBG_GRC_PARAM_NUM_LTIDS */
1596 {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
1599 /* DBG_GRC_PARAM_EXCLUDE_ALL */
1600 {{0, 0}, 0, 1, true, 0, 0},
1602 /* DBG_GRC_PARAM_CRASH */
1603 {{0, 0}, 0, 1, true, 0, 0},
1605 /* DBG_GRC_PARAM_PARITY_SAFE */
1606 {{0, 0}, 0, 1, false, 1, 0},
1608 /* DBG_GRC_PARAM_DUMP_CM */
1609 {{1, 1}, 0, 1, false, 0, 1},
1611 /* DBG_GRC_PARAM_DUMP_PHY */
1612 {{1, 1}, 0, 1, false, 0, 1},
1614 /* DBG_GRC_PARAM_NO_MCP */
1615 {{0, 0}, 0, 1, false, 0, 0},
1617 /* DBG_GRC_PARAM_NO_FW_VER */
1618 {{0, 0}, 0, 1, false, 0, 0}
1621 static struct rss_mem_defs s_rss_mem_defs[] = {
1622 { "rss_mem_cid", "rss_cid", 0,
1626 { "rss_mem_key_msb", "rss_key", 1024,
1630 { "rss_mem_key_lsb", "rss_key", 2048,
1634 { "rss_mem_info", "rss_info", 3072,
1638 { "rss_mem_ind", "rss_ind", 4096,
1643 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1644 {"vfc_ram_tt1", "vfc_ram", 0, 512},
1645 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
1646 {"vfc_ram_stt2", "vfc_ram", 640, 32},
1647 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1650 static struct big_ram_defs s_big_ram_defs[] = {
1651 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1652 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1655 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1656 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1659 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1660 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1664 static struct reset_reg_defs s_reset_regs_defs[] = {
1665 /* DBG_RESET_REG_MISCS_PL_UA */
1666 { MISCS_REG_RESET_PL_UA, 0x0,
1669 /* DBG_RESET_REG_MISCS_PL_HV */
1670 { MISCS_REG_RESET_PL_HV, 0x0,
1673 /* DBG_RESET_REG_MISCS_PL_HV_2 */
1674 { MISCS_REG_RESET_PL_HV_2_K2, 0x0,
1677 /* DBG_RESET_REG_MISC_PL_UA */
1678 { MISC_REG_RESET_PL_UA, 0x0,
1681 /* DBG_RESET_REG_MISC_PL_HV */
1682 { MISC_REG_RESET_PL_HV, 0x0,
1685 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1686 { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
1689 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1690 { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
1693 /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1694 { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
1698 static struct phy_defs s_phy_defs[] = {
1699 {"nw_phy", NWS_REG_NWS_CMU_K2,
1700 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
1701 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
1702 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
1703 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
1704 {"sgmii_phy", MS_REG_MS_CMU_K2,
1705 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
1706 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
1707 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
1708 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
1709 {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
1710 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
1711 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
1712 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
1713 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
1714 {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
1715 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
1716 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
1717 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
1718 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
1721 /**************************** Private Functions ******************************/
1723 /* Reads and returns a single dword from the specified unaligned buffer */
1724 static u32 qed_read_unaligned_dword(u8 *buf)
1728 memcpy((u8 *)&dword, buf, sizeof(dword));
1732 /* Returns the value of the specified GRC param */
1733 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1734 enum dbg_grc_params grc_param)
1736 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1738 return dev_data->grc.param_val[grc_param];
1741 /* Initializes the GRC parameters */
1742 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1744 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1746 if (!dev_data->grc.params_initialized) {
1747 qed_dbg_grc_set_params_default(p_hwfn);
1748 dev_data->grc.params_initialized = 1;
1752 /* Initializes debug data for the specified device */
1753 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1754 struct qed_ptt *p_ptt)
1756 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1758 if (dev_data->initialized)
1759 return DBG_STATUS_OK;
1761 if (QED_IS_K2(p_hwfn->cdev)) {
1762 dev_data->chip_id = CHIP_K2;
1763 dev_data->mode_enable[MODE_K2] = 1;
1764 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1765 dev_data->chip_id = CHIP_BB;
1766 dev_data->mode_enable[MODE_BB] = 1;
1768 return DBG_STATUS_UNKNOWN_CHIP;
1771 dev_data->platform_id = PLATFORM_ASIC;
1772 dev_data->mode_enable[MODE_ASIC] = 1;
1774 /* Initializes the GRC parameters */
1775 qed_dbg_grc_init_params(p_hwfn);
1777 dev_data->initialized = true;
1779 return DBG_STATUS_OK;
1782 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1783 enum block_id block_id)
1785 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1787 return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1792 /* Reads the FW info structure for the specified Storm from the chip,
1793 * and writes it to the specified fw_info pointer.
1795 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1796 struct qed_ptt *p_ptt,
1797 u8 storm_id, struct fw_info *fw_info)
1799 struct storm_defs *storm = &s_storm_defs[storm_id];
1800 struct fw_info_location fw_info_location;
1803 memset(&fw_info_location, 0, sizeof(fw_info_location));
1804 memset(fw_info, 0, sizeof(*fw_info));
1806 /* Read first the address that points to fw_info location.
1807 * The address is located in the last line of the Storm RAM.
1809 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1810 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
1811 sizeof(fw_info_location);
1812 dest = (u32 *)&fw_info_location;
1814 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1815 i++, addr += BYTES_IN_DWORD)
1816 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1818 /* Read FW version info from Storm RAM */
1819 if (fw_info_location.size > 0 && fw_info_location.size <=
1821 addr = fw_info_location.grc_addr;
1822 dest = (u32 *)fw_info;
1823 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1824 i++, addr += BYTES_IN_DWORD)
1825 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1829 /* Dumps the specified string to the specified buffer.
1830 * Returns the dumped size in bytes.
1832 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1835 strcpy(dump_buf, str);
1837 return (u32)strlen(str) + 1;
1840 /* Dumps zeros to align the specified buffer to dwords.
1841 * Returns the dumped size in bytes.
1843 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1845 u8 offset_in_dword, align_size;
1847 offset_in_dword = (u8)(byte_offset & 0x3);
1848 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1850 if (dump && align_size)
1851 memset(dump_buf, 0, align_size);
1856 /* Writes the specified string param to the specified buffer.
1857 * Returns the dumped size in dwords.
1859 static u32 qed_dump_str_param(u32 *dump_buf,
1861 const char *param_name, const char *param_val)
1863 char *char_buf = (char *)dump_buf;
1866 /* Dump param name */
1867 offset += qed_dump_str(char_buf + offset, dump, param_name);
1869 /* Indicate a string param value */
1871 *(char_buf + offset) = 1;
1874 /* Dump param value */
1875 offset += qed_dump_str(char_buf + offset, dump, param_val);
1877 /* Align buffer to next dword */
1878 offset += qed_dump_align(char_buf + offset, dump, offset);
1880 return BYTES_TO_DWORDS(offset);
1883 /* Writes the specified numeric param to the specified buffer.
1884 * Returns the dumped size in dwords.
1886 static u32 qed_dump_num_param(u32 *dump_buf,
1887 bool dump, const char *param_name, u32 param_val)
1889 char *char_buf = (char *)dump_buf;
1892 /* Dump param name */
1893 offset += qed_dump_str(char_buf + offset, dump, param_name);
1895 /* Indicate a numeric param value */
1897 *(char_buf + offset) = 0;
1900 /* Align buffer to next dword */
1901 offset += qed_dump_align(char_buf + offset, dump, offset);
1903 /* Dump param value (and change offset from bytes to dwords) */
1904 offset = BYTES_TO_DWORDS(offset);
1906 *(dump_buf + offset) = param_val;
1912 /* Reads the FW version and writes it as a param to the specified buffer.
1913 * Returns the dumped size in dwords.
1915 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1916 struct qed_ptt *p_ptt,
1917 u32 *dump_buf, bool dump)
1919 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1920 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1921 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1922 struct fw_info fw_info = { {0}, {0} };
1925 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1926 /* Read FW image/version from PRAM in a non-reset SEMI */
1930 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1932 struct storm_defs *storm = &s_storm_defs[storm_id];
1934 /* Read FW version/image */
1935 if (dev_data->block_in_reset[storm->block_id])
1938 /* Read FW info for the current Storm */
1939 qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
1941 /* Create FW version/image strings */
1942 if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1943 "%d_%d_%d_%d", fw_info.ver.num.major,
1944 fw_info.ver.num.minor, fw_info.ver.num.rev,
1945 fw_info.ver.num.eng) < 0)
1947 "Unexpected debug error: invalid FW version string\n");
1948 switch (fw_info.ver.image_id) {
1950 strcpy(fw_img_str, "main");
1953 strcpy(fw_img_str, "unknown");
1961 /* Dump FW version, image and timestamp */
1962 offset += qed_dump_str_param(dump_buf + offset,
1963 dump, "fw-version", fw_ver_str);
1964 offset += qed_dump_str_param(dump_buf + offset,
1965 dump, "fw-image", fw_img_str);
1966 offset += qed_dump_num_param(dump_buf + offset,
1968 "fw-timestamp", fw_info.ver.timestamp);
1973 /* Reads the MFW version and writes it as a param to the specified buffer.
1974 * Returns the dumped size in dwords.
1976 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1977 struct qed_ptt *p_ptt,
1978 u32 *dump_buf, bool dump)
1980 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1983 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1984 u32 global_section_offsize, global_section_addr, mfw_ver;
1985 u32 public_data_addr, global_section_offsize_addr;
1987 /* Find MCP public data GRC address. Needs to be ORed with
1988 * MCP_REG_SCRATCH due to a HW bug.
1990 public_data_addr = qed_rd(p_hwfn,
1992 MISC_REG_SHARED_MEM_ADDR) |
1995 /* Find MCP public global section offset */
1996 global_section_offsize_addr = public_data_addr +
1997 offsetof(struct mcp_public_data,
1999 sizeof(offsize_t) * PUBLIC_GLOBAL;
2000 global_section_offsize = qed_rd(p_hwfn, p_ptt,
2001 global_section_offsize_addr);
2002 global_section_addr =
2004 (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2006 /* Read MFW version from MCP public global section */
2007 mfw_ver = qed_rd(p_hwfn, p_ptt,
2008 global_section_addr +
2009 offsetof(struct public_global, mfw_ver));
2011 /* Dump MFW version param */
2012 if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2013 (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2014 (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2016 "Unexpected debug error: invalid MFW version string\n");
2019 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2022 /* Writes a section header to the specified buffer.
2023 * Returns the dumped size in dwords.
2025 static u32 qed_dump_section_hdr(u32 *dump_buf,
2026 bool dump, const char *name, u32 num_params)
2028 return qed_dump_num_param(dump_buf, dump, name, num_params);
2031 /* Writes the common global params to the specified buffer.
2032 * Returns the dumped size in dwords.
2034 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2035 struct qed_ptt *p_ptt,
2038 u8 num_specific_global_params)
2040 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2044 /* Dump global params section header */
2045 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2046 offset += qed_dump_section_hdr(dump_buf + offset,
2047 dump, "global_params", num_params);
2050 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2051 offset += qed_dump_mfw_ver_param(p_hwfn,
2052 p_ptt, dump_buf + offset, dump);
2053 offset += qed_dump_num_param(dump_buf + offset,
2054 dump, "tools-version", TOOLS_VERSION);
2055 offset += qed_dump_str_param(dump_buf + offset,
2058 s_chip_defs[dev_data->chip_id].name);
2059 offset += qed_dump_str_param(dump_buf + offset,
2062 s_platform_defs[dev_data->platform_id].
2065 qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2071 /* Writes the "last" section (including CRC) to the specified buffer at the
2072 * given offset. Returns the dumped size in dwords.
2074 static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
2075 u32 *dump_buf, u32 offset, bool dump)
2077 u32 start_offset = offset;
2079 /* Dump CRC section header */
2080 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2082 /* Calculate CRC32 and add it to the dword after the "last" section */
2084 *(dump_buf + offset) = ~crc32(0xffffffff,
2086 DWORDS_TO_BYTES(offset));
2090 return offset - start_offset;
2093 /* Update blocks reset state */
2094 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2095 struct qed_ptt *p_ptt)
2097 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2098 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2101 /* Read reset registers */
2102 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2103 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2104 reg_val[i] = qed_rd(p_hwfn,
2105 p_ptt, s_reset_regs_defs[i].addr);
2107 /* Check if blocks are in reset */
2108 for (i = 0; i < MAX_BLOCK_ID; i++) {
2109 struct block_defs *block = s_block_defs[i];
2111 dev_data->block_in_reset[i] = block->has_reset_bit &&
2112 !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2116 /* Enable / disable the Debug block */
2117 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2118 struct qed_ptt *p_ptt, bool enable)
2120 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2123 /* Resets the Debug block */
2124 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2125 struct qed_ptt *p_ptt)
2127 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2128 struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2130 dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2131 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2133 old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2135 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2136 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2139 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2140 struct qed_ptt *p_ptt,
2141 enum dbg_bus_frame_modes mode)
2143 qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2146 /* Enable / disable Debug Bus clients according to the specified mask
2147 * (1 = enable, 0 = disable).
2149 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2150 struct qed_ptt *p_ptt, u32 client_mask)
2152 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2155 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2157 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2162 /* Get next element from modes tree buffer */
2163 ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2164 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2167 case INIT_MODE_OP_NOT:
2168 return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2169 case INIT_MODE_OP_OR:
2170 case INIT_MODE_OP_AND:
2171 arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2172 arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2173 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2174 arg2) : (arg1 && arg2);
2176 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2180 /* Returns true if the specified entity (indicated by GRC param) should be
2181 * included in the dump, false otherwise.
2183 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2184 enum dbg_grc_params grc_param)
2186 return qed_grc_get_param(p_hwfn, grc_param) > 0;
2189 /* Returns true of the specified Storm should be included in the dump, false
2192 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2193 enum dbg_storms storm)
2195 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2198 /* Returns true if the specified memory should be included in the dump, false
2201 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2202 enum block_id block_id, u8 mem_group_id)
2204 struct block_defs *block = s_block_defs[block_id];
2207 /* Check Storm match */
2208 if (block->associated_to_storm &&
2209 !qed_grc_is_storm_included(p_hwfn,
2210 (enum dbg_storms)block->storm_id))
2213 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2214 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2216 if (mem_group_id == big_ram->mem_group_id ||
2217 mem_group_id == big_ram->ram_mem_group_id)
2218 return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2221 switch (mem_group_id) {
2222 case MEM_GROUP_PXP_ILT:
2223 case MEM_GROUP_PXP_MEM:
2224 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2226 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2227 case MEM_GROUP_PBUF:
2228 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2229 case MEM_GROUP_CAU_MEM:
2230 case MEM_GROUP_CAU_SB:
2231 case MEM_GROUP_CAU_PI:
2232 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2233 case MEM_GROUP_QM_MEM:
2234 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2235 case MEM_GROUP_CFC_MEM:
2236 case MEM_GROUP_CONN_CFC_MEM:
2237 case MEM_GROUP_TASK_CFC_MEM:
2238 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
2239 case MEM_GROUP_IGU_MEM:
2240 case MEM_GROUP_IGU_MSIX:
2241 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2242 case MEM_GROUP_MULD_MEM:
2243 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2244 case MEM_GROUP_PRS_MEM:
2245 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2246 case MEM_GROUP_DMAE_MEM:
2247 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2248 case MEM_GROUP_TM_MEM:
2249 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2250 case MEM_GROUP_SDM_MEM:
2251 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2252 case MEM_GROUP_TDIF_CTX:
2253 case MEM_GROUP_RDIF_CTX:
2254 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2255 case MEM_GROUP_CM_MEM:
2256 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2258 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2264 /* Stalls all Storms */
2265 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2266 struct qed_ptt *p_ptt, bool stall)
2271 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2272 if (!qed_grc_is_storm_included(p_hwfn,
2273 (enum dbg_storms)storm_id))
2276 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2277 SEM_FAST_REG_STALL_0_BB_K2;
2278 qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2281 msleep(STALL_DELAY_MS);
2284 /* Takes all blocks out of reset */
2285 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2286 struct qed_ptt *p_ptt)
2288 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2289 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2292 /* Fill reset regs values */
2293 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2294 struct block_defs *block = s_block_defs[block_id];
2296 if (block->has_reset_bit && block->unreset)
2297 reg_val[block->reset_reg] |=
2298 BIT(block->reset_bit_offset);
2301 /* Write reset registers */
2302 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2303 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2306 reg_val[i] |= s_reset_regs_defs[i].unreset_val;
2311 s_reset_regs_defs[i].addr +
2312 RESET_REG_UNRESET_OFFSET, reg_val[i]);
2316 /* Returns the attention block data of the specified block */
2317 static const struct dbg_attn_block_type_data *
2318 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2320 const struct dbg_attn_block *base_attn_block_arr =
2321 (const struct dbg_attn_block *)
2322 s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2324 return &base_attn_block_arr[block_id].per_type_data[attn_type];
2327 /* Returns the attention registers of the specified block */
2328 static const struct dbg_attn_reg *
2329 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2332 const struct dbg_attn_block_type_data *block_type_data =
2333 qed_get_block_attn_data(block_id, attn_type);
2335 *num_attn_regs = block_type_data->num_regs;
2337 return &((const struct dbg_attn_reg *)
2338 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2342 /* For each block, clear the status of all parities */
2343 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2344 struct qed_ptt *p_ptt)
2346 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2347 const struct dbg_attn_reg *attn_reg_arr;
2348 u8 reg_idx, num_attn_regs;
2351 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2352 if (dev_data->block_in_reset[block_id])
2355 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2359 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2360 const struct dbg_attn_reg *reg_data =
2361 &attn_reg_arr[reg_idx];
2362 u16 modes_buf_offset;
2366 eval_mode = GET_FIELD(reg_data->mode.data,
2367 DBG_MODE_HDR_EVAL_MODE) > 0;
2369 GET_FIELD(reg_data->mode.data,
2370 DBG_MODE_HDR_MODES_BUF_OFFSET);
2372 /* If Mode match: clear parity status */
2374 qed_is_mode_match(p_hwfn, &modes_buf_offset))
2375 qed_rd(p_hwfn, p_ptt,
2376 DWORDS_TO_BYTES(reg_data->
2382 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2383 * The following parameters are dumped:
2384 * - count: no. of dumped entries
2385 * - split: split type
2386 * - id: split ID (dumped only if split_id >= 0)
2387 * - param_name: user parameter value (dumped only if param_name != NULL
2388 * and param_val != NULL).
2390 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2392 u32 num_reg_entries,
2393 const char *split_type,
2395 const char *param_name, const char *param_val)
2397 u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2400 offset += qed_dump_section_hdr(dump_buf + offset,
2401 dump, "grc_regs", num_params);
2402 offset += qed_dump_num_param(dump_buf + offset,
2403 dump, "count", num_reg_entries);
2404 offset += qed_dump_str_param(dump_buf + offset,
2405 dump, "split", split_type);
2407 offset += qed_dump_num_param(dump_buf + offset,
2408 dump, "id", split_id);
2409 if (param_name && param_val)
2410 offset += qed_dump_str_param(dump_buf + offset,
2411 dump, param_name, param_val);
2416 /* Dumps the GRC registers in the specified address range.
2417 * Returns the dumped size in dwords.
2418 * The addr and len arguments are specified in dwords.
2420 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2421 struct qed_ptt *p_ptt,
2423 bool dump, u32 addr, u32 len, bool wide_bus)
2425 u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2430 for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2431 *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
2436 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2437 * The addr and len arguments are specified in dwords.
2439 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2440 bool dump, u32 addr, u32 len)
2443 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2448 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2449 * The addr and len arguments are specified in dwords.
2451 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2452 struct qed_ptt *p_ptt,
2454 bool dump, u32 addr, u32 len, bool wide_bus)
2458 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2459 offset += qed_grc_dump_addr_range(p_hwfn,
2462 dump, addr, len, wide_bus);
2467 /* Dumps GRC registers sequence with skip cycle.
2468 * Returns the dumped size in dwords.
2469 * - addr: start GRC address in dwords
2470 * - total_len: total no. of dwords to dump
2471 * - read_len: no. consecutive dwords to read
2472 * - skip_len: no. of dwords to skip (and fill with zeros)
2474 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2475 struct qed_ptt *p_ptt,
2480 u32 read_len, u32 skip_len)
2482 u32 offset = 0, reg_offset = 0;
2484 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2487 return offset + total_len;
2489 while (reg_offset < total_len) {
2490 u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2492 offset += qed_grc_dump_addr_range(p_hwfn,
2495 dump, addr, curr_len, false);
2496 reg_offset += curr_len;
2499 if (reg_offset < total_len) {
2500 curr_len = min_t(u32, skip_len, total_len - skip_len);
2501 memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2503 reg_offset += curr_len;
2511 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2512 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2513 struct qed_ptt *p_ptt,
2514 struct dbg_array input_regs_arr,
2517 bool block_enable[MAX_BLOCK_ID],
2518 u32 *num_dumped_reg_entries)
2520 u32 i, offset = 0, input_offset = 0;
2521 bool mode_match = true;
2523 *num_dumped_reg_entries = 0;
2525 while (input_offset < input_regs_arr.size_in_dwords) {
2526 const struct dbg_dump_cond_hdr *cond_hdr =
2527 (const struct dbg_dump_cond_hdr *)
2528 &input_regs_arr.ptr[input_offset++];
2529 u16 modes_buf_offset;
2532 /* Check mode/block */
2533 eval_mode = GET_FIELD(cond_hdr->mode.data,
2534 DBG_MODE_HDR_EVAL_MODE) > 0;
2537 GET_FIELD(cond_hdr->mode.data,
2538 DBG_MODE_HDR_MODES_BUF_OFFSET);
2539 mode_match = qed_is_mode_match(p_hwfn,
2543 if (!mode_match || !block_enable[cond_hdr->block_id]) {
2544 input_offset += cond_hdr->data_size;
2548 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2549 const struct dbg_dump_reg *reg =
2550 (const struct dbg_dump_reg *)
2551 &input_regs_arr.ptr[input_offset];
2555 addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2556 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2557 wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2558 offset += qed_grc_dump_reg_entry(p_hwfn,
2565 (*num_dumped_reg_entries)++;
2572 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2573 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2574 struct qed_ptt *p_ptt,
2575 struct dbg_array input_regs_arr,
2578 bool block_enable[MAX_BLOCK_ID],
2579 const char *split_type_name,
2581 const char *param_name,
2582 const char *param_val)
2584 u32 num_dumped_reg_entries, offset;
2586 /* Calculate register dump header size (and skip it for now) */
2587 offset = qed_grc_dump_regs_hdr(dump_buf,
2591 split_id, param_name, param_val);
2593 /* Dump registers */
2594 offset += qed_grc_dump_regs_entries(p_hwfn,
2600 &num_dumped_reg_entries);
2602 /* Write register dump header */
2603 if (dump && num_dumped_reg_entries > 0)
2604 qed_grc_dump_regs_hdr(dump_buf,
2606 num_dumped_reg_entries,
2608 split_id, param_name, param_val);
2610 return num_dumped_reg_entries > 0 ? offset : 0;
2613 /* Dumps registers according to the input registers array. Returns the dumped
2616 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2617 struct qed_ptt *p_ptt,
2620 bool block_enable[MAX_BLOCK_ID],
2621 const char *param_name, const char *param_val)
2623 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2624 struct chip_platform_defs *chip_platform;
2625 u32 offset = 0, input_offset = 0;
2626 struct chip_defs *chip;
2627 u8 port_id, pf_id, vf_id;
2630 chip = &s_chip_defs[dev_data->chip_id];
2631 chip_platform = &chip->per_platform[dev_data->platform_id];
2634 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
2636 while (input_offset <
2637 s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2638 const struct dbg_dump_split_hdr *split_hdr;
2639 struct dbg_array curr_input_regs_arr;
2640 u32 split_data_size;
2644 (const struct dbg_dump_split_hdr *)
2645 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2647 GET_FIELD(split_hdr->hdr,
2648 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2650 GET_FIELD(split_hdr->hdr,
2651 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2652 curr_input_regs_arr.ptr =
2653 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2654 curr_input_regs_arr.size_in_dwords = split_data_size;
2656 switch (split_type_id) {
2657 case SPLIT_TYPE_NONE:
2658 offset += qed_grc_dump_split_data(p_hwfn,
2660 curr_input_regs_arr,
2670 case SPLIT_TYPE_PORT:
2671 for (port_id = 0; port_id < chip_platform->num_ports;
2674 qed_port_pretend(p_hwfn, p_ptt,
2677 qed_grc_dump_split_data(p_hwfn, p_ptt,
2678 curr_input_regs_arr,
2688 case SPLIT_TYPE_PORT_PF:
2689 for (pf_id = 0; pf_id < chip_platform->num_pfs;
2692 PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2695 fid = pf_id << pfid_shift;
2696 qed_fid_pretend(p_hwfn, p_ptt, fid);
2700 qed_grc_dump_split_data(p_hwfn,
2702 curr_input_regs_arr,
2714 for (vf_id = 0; vf_id < chip_platform->num_vfs;
2717 PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2719 PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2722 fid = BIT(vfvalid_shift) |
2723 (vf_id << vfid_shift);
2724 qed_fid_pretend(p_hwfn, p_ptt, fid);
2728 qed_grc_dump_split_data(p_hwfn, p_ptt,
2729 curr_input_regs_arr,
2742 input_offset += split_data_size;
2745 /* Pretend to original PF */
2747 fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2748 qed_fid_pretend(p_hwfn, p_ptt, fid);
2754 /* Dump reset registers. Returns the dumped size in dwords. */
2755 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2756 struct qed_ptt *p_ptt,
2757 u32 *dump_buf, bool dump)
2759 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2760 u32 i, offset = 0, num_regs = 0;
2762 /* Calculate header size */
2763 offset += qed_grc_dump_regs_hdr(dump_buf,
2764 false, 0, "eng", -1, NULL, NULL);
2766 /* Write reset registers */
2767 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2768 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2771 offset += qed_grc_dump_reg_entry(p_hwfn,
2776 (s_reset_regs_defs[i].addr), 1,
2783 qed_grc_dump_regs_hdr(dump_buf,
2784 true, num_regs, "eng", -1, NULL, NULL);
2789 /* Dump registers that are modified during GRC Dump and therefore must be
2790 * dumped first. Returns the dumped size in dwords.
2792 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2793 struct qed_ptt *p_ptt,
2794 u32 *dump_buf, bool dump)
2796 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2797 u32 block_id, offset = 0, num_reg_entries = 0;
2798 const struct dbg_attn_reg *attn_reg_arr;
2799 u8 storm_id, reg_idx, num_attn_regs;
2801 /* Calculate header size */
2802 offset += qed_grc_dump_regs_hdr(dump_buf,
2803 false, 0, "eng", -1, NULL, NULL);
2805 /* Write parity registers */
2806 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2807 if (dev_data->block_in_reset[block_id] && dump)
2810 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2814 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2815 const struct dbg_attn_reg *reg_data =
2816 &attn_reg_arr[reg_idx];
2817 u16 modes_buf_offset;
2822 eval_mode = GET_FIELD(reg_data->mode.data,
2823 DBG_MODE_HDR_EVAL_MODE) > 0;
2825 GET_FIELD(reg_data->mode.data,
2826 DBG_MODE_HDR_MODES_BUF_OFFSET);
2828 !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2831 /* Mode match: read & dump registers */
2832 addr = reg_data->mask_address;
2833 offset += qed_grc_dump_reg_entry(p_hwfn,
2839 addr = GET_FIELD(reg_data->data,
2840 DBG_ATTN_REG_STS_ADDRESS);
2841 offset += qed_grc_dump_reg_entry(p_hwfn,
2847 num_reg_entries += 2;
2851 /* Write Storm stall status registers */
2852 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2853 struct storm_defs *storm = &s_storm_defs[storm_id];
2856 if (dev_data->block_in_reset[storm->block_id] && dump)
2860 BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2861 SEM_FAST_REG_STALLED);
2862 offset += qed_grc_dump_reg_entry(p_hwfn,
2874 qed_grc_dump_regs_hdr(dump_buf,
2876 num_reg_entries, "eng", -1, NULL, NULL);
2881 /* Dumps registers that can't be represented in the debug arrays */
2882 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2883 struct qed_ptt *p_ptt,
2884 u32 *dump_buf, bool dump)
2886 u32 offset = 0, addr;
2888 offset += qed_grc_dump_regs_hdr(dump_buf,
2889 dump, 2, "eng", -1, NULL, NULL);
2891 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2894 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2895 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2900 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2903 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2905 qed_grc_dump_reg_entry_skip(p_hwfn,
2910 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2917 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2918 * dwords. The following parameters are dumped:
2919 * - name: dumped only if it's not NULL.
2920 * - addr: in dwords, dumped only if name is NULL.
2921 * - len: in dwords, always dumped.
2922 * - width: dumped if it's not zero.
2923 * - packed: dumped only if it's not false.
2924 * - mem_group: always dumped.
2925 * - is_storm: true only if the memory is related to a Storm.
2926 * - storm_letter: valid only if is_storm is true.
2929 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2937 const char *mem_group,
2938 bool is_storm, char storm_letter)
2946 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2953 /* Dump section header */
2954 offset += qed_dump_section_hdr(dump_buf + offset,
2955 dump, "grc_mem", num_params);
2960 strcpy(buf, "?STORM_");
2961 buf[0] = storm_letter;
2962 strcpy(buf + strlen(buf), name);
2967 offset += qed_dump_str_param(dump_buf + offset,
2972 "Dumping %d registers from %s...\n",
2976 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2978 offset += qed_dump_num_param(dump_buf + offset,
2979 dump, "addr", addr_in_bytes);
2980 if (dump && len > 64)
2983 "Dumping %d registers from address 0x%x...\n",
2984 len, addr_in_bytes);
2988 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2990 /* Dump bit width */
2992 offset += qed_dump_num_param(dump_buf + offset,
2993 dump, "width", bit_width);
2997 offset += qed_dump_num_param(dump_buf + offset,
3002 strcpy(buf, "?STORM_");
3003 buf[0] = storm_letter;
3004 strcpy(buf + strlen(buf), mem_group);
3006 strcpy(buf, mem_group);
3009 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3014 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3015 * Returns the dumped size in dwords.
3016 * The addr and len arguments are specified in dwords.
3018 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3019 struct qed_ptt *p_ptt,
3028 const char *mem_group,
3029 bool is_storm, char storm_letter)
3033 offset += qed_grc_dump_mem_hdr(p_hwfn,
3041 mem_group, is_storm, storm_letter);
3042 offset += qed_grc_dump_addr_range(p_hwfn,
3045 dump, addr, len, wide_bus);
3050 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3051 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3052 struct qed_ptt *p_ptt,
3053 struct dbg_array input_mems_arr,
3054 u32 *dump_buf, bool dump)
3056 u32 i, offset = 0, input_offset = 0;
3057 bool mode_match = true;
3059 while (input_offset < input_mems_arr.size_in_dwords) {
3060 const struct dbg_dump_cond_hdr *cond_hdr;
3061 u16 modes_buf_offset;
3065 cond_hdr = (const struct dbg_dump_cond_hdr *)
3066 &input_mems_arr.ptr[input_offset++];
3067 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3069 /* Check required mode */
3070 eval_mode = GET_FIELD(cond_hdr->mode.data,
3071 DBG_MODE_HDR_EVAL_MODE) > 0;
3074 GET_FIELD(cond_hdr->mode.data,
3075 DBG_MODE_HDR_MODES_BUF_OFFSET);
3076 mode_match = qed_is_mode_match(p_hwfn,
3081 input_offset += cond_hdr->data_size;
3085 for (i = 0; i < num_entries;
3086 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3087 const struct dbg_dump_mem *mem =
3088 (const struct dbg_dump_mem *)
3089 &input_mems_arr.ptr[input_offset];
3090 u8 mem_group_id = GET_FIELD(mem->dword0,
3091 DBG_DUMP_MEM_MEM_GROUP_ID);
3092 bool is_storm = false, mem_wide_bus;
3093 enum dbg_grc_params grc_param;
3094 char storm_letter = 'a';
3095 enum block_id block_id;
3096 u32 mem_addr, mem_len;
3098 if (mem_group_id >= MEM_GROUPS_NUM) {
3099 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3103 block_id = (enum block_id)cond_hdr->block_id;
3104 if (!qed_grc_is_mem_included(p_hwfn,
3109 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3110 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3111 mem_wide_bus = GET_FIELD(mem->dword1,
3112 DBG_DUMP_MEM_WIDE_BUS);
3114 /* Update memory length for CCFC/TCFC memories
3115 * according to number of LCIDs/LTIDs.
3117 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3118 if (mem_len % MAX_LCIDS) {
3120 "Invalid CCFC connection memory size\n");
3124 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3125 mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3126 (mem_len / MAX_LCIDS);
3127 } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3128 if (mem_len % MAX_LTIDS) {
3130 "Invalid TCFC task memory size\n");
3134 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3135 mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3136 (mem_len / MAX_LTIDS);
3139 /* If memory is associated with Storm, update Storm
3143 [cond_hdr->block_id]->associated_to_storm) {
3146 s_storm_defs[s_block_defs
3147 [cond_hdr->block_id]->
3152 offset += qed_grc_dump_mem(p_hwfn,
3162 s_mem_group_names[mem_group_id],
3171 /* Dumps GRC memories according to the input array dump_mem.
3172 * Returns the dumped size in dwords.
3174 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3175 struct qed_ptt *p_ptt,
3176 u32 *dump_buf, bool dump)
3178 u32 offset = 0, input_offset = 0;
3180 while (input_offset <
3181 s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3182 const struct dbg_dump_split_hdr *split_hdr;
3183 struct dbg_array curr_input_mems_arr;
3184 u32 split_data_size;
3187 split_hdr = (const struct dbg_dump_split_hdr *)
3188 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3190 GET_FIELD(split_hdr->hdr,
3191 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3193 GET_FIELD(split_hdr->hdr,
3194 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3195 curr_input_mems_arr.ptr =
3196 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3197 curr_input_mems_arr.size_in_dwords = split_data_size;
3199 switch (split_type_id) {
3200 case SPLIT_TYPE_NONE:
3201 offset += qed_grc_dump_mem_entries(p_hwfn,
3203 curr_input_mems_arr,
3210 "Dumping split memories is currently not supported\n");
3214 input_offset += split_data_size;
3220 /* Dumps GRC context data for the specified Storm.
3221 * Returns the dumped size in dwords.
3222 * The lid_size argument is specified in quad-regs.
3224 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3225 struct qed_ptt *p_ptt,
3234 struct storm_defs *storm = &s_storm_defs[storm_id];
3235 u32 i, lid, total_size, offset = 0;
3240 lid_size *= BYTES_IN_DWORD;
3241 total_size = num_lids * lid_size;
3243 offset += qed_grc_dump_mem_hdr(p_hwfn,
3250 false, name, true, storm->letter);
3253 return offset + total_size;
3255 /* Dump context data */
3256 for (lid = 0; lid < num_lids; lid++) {
3257 for (i = 0; i < lid_size; i++, offset++) {
3259 p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3260 *(dump_buf + offset) = qed_rd(p_hwfn,
3261 p_ptt, rd_reg_addr);
3268 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3269 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3270 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3272 enum dbg_grc_params grc_param;
3276 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3277 struct storm_defs *storm = &s_storm_defs[storm_id];
3279 if (!qed_grc_is_storm_included(p_hwfn,
3280 (enum dbg_storms)storm_id))
3283 /* Dump Conn AG context size */
3284 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3286 qed_grc_dump_ctx_data(p_hwfn,
3291 qed_grc_get_param(p_hwfn,
3293 storm->cm_conn_ag_ctx_lid_size,
3294 storm->cm_conn_ag_ctx_rd_addr,
3297 /* Dump Conn ST context size */
3298 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3300 qed_grc_dump_ctx_data(p_hwfn,
3305 qed_grc_get_param(p_hwfn,
3307 storm->cm_conn_st_ctx_lid_size,
3308 storm->cm_conn_st_ctx_rd_addr,
3311 /* Dump Task AG context size */
3312 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3314 qed_grc_dump_ctx_data(p_hwfn,
3319 qed_grc_get_param(p_hwfn,
3321 storm->cm_task_ag_ctx_lid_size,
3322 storm->cm_task_ag_ctx_rd_addr,
3325 /* Dump Task ST context size */
3326 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3328 qed_grc_dump_ctx_data(p_hwfn,
3333 qed_grc_get_param(p_hwfn,
3335 storm->cm_task_st_ctx_lid_size,
3336 storm->cm_task_st_ctx_rd_addr,
3343 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3344 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3345 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3347 char buf[10] = "IOR_SET_?";
3348 u32 addr, offset = 0;
3349 u8 storm_id, set_id;
3351 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3352 struct storm_defs *storm = &s_storm_defs[storm_id];
3354 if (!qed_grc_is_storm_included(p_hwfn,
3355 (enum dbg_storms)storm_id))
3358 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3359 addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3360 SEM_FAST_REG_STORM_REG_FILE) +
3361 IOR_SET_OFFSET(set_id);
3362 buf[strlen(buf) - 1] = '0' + set_id;
3363 offset += qed_grc_dump_mem(p_hwfn,
3382 /* Dump VFC CAM. Returns the dumped size in dwords. */
3383 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3384 struct qed_ptt *p_ptt,
3385 u32 *dump_buf, bool dump, u8 storm_id)
3387 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3388 struct storm_defs *storm = &s_storm_defs[storm_id];
3389 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3390 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3391 u32 row, i, offset = 0;
3393 offset += qed_grc_dump_mem_hdr(p_hwfn,
3400 false, "vfc_cam", true, storm->letter);
3403 return offset + total_size;
3405 /* Prepare CAM address */
3406 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3408 for (row = 0; row < VFC_CAM_NUM_ROWS;
3409 row++, offset += VFC_CAM_RESP_DWORDS) {
3410 /* Write VFC CAM command */
3411 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3414 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3415 cam_cmd, VFC_CAM_CMD_DWORDS);
3417 /* Write VFC CAM address */
3420 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3421 cam_addr, VFC_CAM_ADDR_DWORDS);
3423 /* Read VFC CAM read response */
3426 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3427 dump_buf + offset, VFC_CAM_RESP_DWORDS);
3433 /* Dump VFC RAM. Returns the dumped size in dwords. */
3434 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3435 struct qed_ptt *p_ptt,
3438 u8 storm_id, struct vfc_ram_defs *ram_defs)
3440 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3441 struct storm_defs *storm = &s_storm_defs[storm_id];
3442 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3443 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3444 u32 row, i, offset = 0;
3446 offset += qed_grc_dump_mem_hdr(p_hwfn,
3454 ram_defs->type_name,
3455 true, storm->letter);
3457 /* Prepare RAM address */
3458 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3461 return offset + total_size;
3463 for (row = ram_defs->base_row;
3464 row < ram_defs->base_row + ram_defs->num_rows;
3465 row++, offset += VFC_RAM_RESP_DWORDS) {
3466 /* Write VFC RAM command */
3469 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3470 ram_cmd, VFC_RAM_CMD_DWORDS);
3472 /* Write VFC RAM address */
3473 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3476 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3477 ram_addr, VFC_RAM_ADDR_DWORDS);
3479 /* Read VFC RAM read response */
3482 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3483 dump_buf + offset, VFC_RAM_RESP_DWORDS);
3489 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3490 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3491 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3493 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3497 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3498 if (!qed_grc_is_storm_included(p_hwfn,
3499 (enum dbg_storms)storm_id) ||
3500 !s_storm_defs[storm_id].has_vfc ||
3501 (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3506 offset += qed_grc_dump_vfc_cam(p_hwfn,
3512 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3513 offset += qed_grc_dump_vfc_ram(p_hwfn,
3518 &s_vfc_ram_defs[i]);
3524 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3525 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3526 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3528 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3532 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3533 u32 rss_addr, num_entries, entry_width, total_dwords, i;
3534 struct rss_mem_defs *rss_defs;
3538 rss_defs = &s_rss_mem_defs[rss_mem_id];
3539 rss_addr = rss_defs->addr;
3540 num_entries = rss_defs->num_entries[dev_data->chip_id];
3541 entry_width = rss_defs->entry_width[dev_data->chip_id];
3542 total_dwords = (num_entries * entry_width) / 32;
3543 packed = (entry_width == 16);
3545 offset += qed_grc_dump_mem_hdr(p_hwfn,
3553 rss_defs->type_name, false, 0);
3557 offset += total_dwords;
3561 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3562 size = RSS_REG_RSS_RAM_DATA_SIZE;
3563 for (i = 0; i < total_dwords; i += size, rss_addr++) {
3564 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3565 offset += qed_grc_dump_addr_range(p_hwfn,
3578 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3579 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3580 struct qed_ptt *p_ptt,
3581 u32 *dump_buf, bool dump, u8 big_ram_id)
3583 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3584 u32 total_blocks, ram_size, offset = 0, i;
3585 char mem_name[12] = "???_BIG_RAM";
3586 char type_name[8] = "???_RAM";
3587 struct big_ram_defs *big_ram;
3589 big_ram = &s_big_ram_defs[big_ram_id];
3590 total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3591 ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3593 strncpy(type_name, big_ram->instance_name,
3594 strlen(big_ram->instance_name));
3595 strncpy(mem_name, big_ram->instance_name,
3596 strlen(big_ram->instance_name));
3598 /* Dump memory header */
3599 offset += qed_grc_dump_mem_hdr(p_hwfn,
3605 BIG_RAM_BLOCK_SIZE_BYTES * 8,
3606 false, type_name, false, 0);
3608 /* Read and dump Big RAM data */
3610 return offset + ram_size;
3613 for (i = 0; i < total_blocks / 2; i++) {
3616 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3617 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3618 len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
3619 offset += qed_grc_dump_addr_range(p_hwfn,
3631 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3632 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3634 bool block_enable[MAX_BLOCK_ID] = { 0 };
3635 u32 offset = 0, addr;
3636 bool halted = false;
3639 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3640 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3642 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3645 /* Dump MCP scratchpad */
3646 offset += qed_grc_dump_mem(p_hwfn,
3651 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3652 MCP_REG_SCRATCH_SIZE,
3653 false, 0, false, "MCP", false, 0);
3655 /* Dump MCP cpu_reg_file */
3656 offset += qed_grc_dump_mem(p_hwfn,
3661 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3662 MCP_REG_CPU_REG_FILE_SIZE,
3663 false, 0, false, "MCP", false, 0);
3665 /* Dump MCP registers */
3666 block_enable[BLOCK_MCP] = true;
3667 offset += qed_grc_dump_registers(p_hwfn,
3670 dump, block_enable, "block", "MCP");
3672 /* Dump required non-MCP registers */
3673 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3674 dump, 1, "eng", -1, "block", "MCP");
3675 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3676 offset += qed_grc_dump_reg_entry(p_hwfn,
3685 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3686 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3691 /* Dumps the tbus indirect memory for all PHYs. */
3692 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3693 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3695 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3699 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3700 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3701 struct phy_defs *phy_defs;
3704 phy_defs = &s_phy_defs[phy_id];
3705 addr_lo_addr = phy_defs->base_addr +
3706 phy_defs->tbus_addr_lo_addr;
3707 addr_hi_addr = phy_defs->base_addr +
3708 phy_defs->tbus_addr_hi_addr;
3709 data_lo_addr = phy_defs->base_addr +
3710 phy_defs->tbus_data_lo_addr;
3711 data_hi_addr = phy_defs->base_addr +
3712 phy_defs->tbus_data_hi_addr;
3713 bytes_buf = (u8 *)(dump_buf + offset);
3715 if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3716 phy_defs->phy_name) < 0)
3718 "Unexpected debug error: invalid PHY memory name\n");
3720 offset += qed_grc_dump_mem_hdr(p_hwfn,
3725 PHY_DUMP_SIZE_DWORDS,
3726 16, true, mem_name, false, 0);
3729 offset += PHY_DUMP_SIZE_DWORDS;
3733 for (tbus_hi_offset = 0;
3734 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3736 qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3737 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3740 p_ptt, addr_lo_addr, tbus_lo_offset);
3741 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3744 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3750 offset += PHY_DUMP_SIZE_DWORDS;
3756 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3757 struct qed_ptt *p_ptt,
3758 enum block_id block_id,
3762 u8 force_valid_mask, u8 force_frame_mask)
3764 struct block_defs *block = s_block_defs[block_id];
3766 qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3767 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3768 qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3769 qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3770 qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3773 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3774 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3775 struct qed_ptt *p_ptt,
3776 u32 *dump_buf, bool dump)
3778 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3779 u32 block_id, line_id, offset = 0;
3781 /* Skip static debug if a debug bus recording is in progress */
3782 if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3787 QED_MSG_DEBUG, "Dumping static debug data...\n");
3789 /* Disable all blocks debug output */
3790 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3791 struct block_defs *block = s_block_defs[block_id];
3793 if (block->has_dbg_bus[dev_data->chip_id])
3794 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3798 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3799 qed_bus_set_framing_mode(p_hwfn,
3800 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3802 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3803 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3804 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3807 /* Dump all static debug lines for each relevant block */
3808 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3809 struct block_defs *block = s_block_defs[block_id];
3810 struct dbg_bus_block *block_desc;
3811 u32 block_dwords, addr, len;
3814 if (!block->has_dbg_bus[dev_data->chip_id])
3818 get_dbg_bus_block_desc(p_hwfn,
3819 (enum block_id)block_id);
3820 block_dwords = NUM_DBG_LINES(block_desc) *
3821 STATIC_DEBUG_LINE_DWORDS;
3823 /* Dump static section params */
3824 offset += qed_grc_dump_mem_hdr(p_hwfn,
3830 32, false, "STATIC", false, 0);
3833 offset += block_dwords;
3837 /* If all lines are invalid - dump zeros */
3838 if (dev_data->block_in_reset[block_id]) {
3839 memset(dump_buf + offset, 0,
3840 DWORDS_TO_BYTES(block_dwords));
3841 offset += block_dwords;
3845 /* Enable block's client */
3846 dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3847 qed_bus_enable_clients(p_hwfn,
3849 BIT(dbg_client_id));
3851 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3852 len = STATIC_DEBUG_LINE_DWORDS;
3853 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3855 /* Configure debug line ID */
3856 qed_config_dbg_line(p_hwfn,
3858 (enum block_id)block_id,
3859 (u8)line_id, 0xf, 0, 0, 0);
3861 /* Read debug line info */
3862 offset += qed_grc_dump_addr_range(p_hwfn,
3871 /* Disable block's client and debug output */
3872 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3873 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3877 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3878 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3884 /* Performs GRC Dump to the specified buffer.
3885 * Returns the dumped size in dwords.
3887 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3888 struct qed_ptt *p_ptt,
3890 bool dump, u32 *num_dumped_dwords)
3892 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3893 bool parities_masked = false;
3894 u8 i, port_mode = 0;
3897 *num_dumped_dwords = 0;
3900 /* Find port mode */
3901 switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3913 /* Update reset state */
3914 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3917 /* Dump global params */
3918 offset += qed_dump_common_global_params(p_hwfn,
3920 dump_buf + offset, dump, 4);
3921 offset += qed_dump_str_param(dump_buf + offset,
3922 dump, "dump-type", "grc-dump");
3923 offset += qed_dump_num_param(dump_buf + offset,
3926 qed_grc_get_param(p_hwfn,
3927 DBG_GRC_PARAM_NUM_LCIDS));
3928 offset += qed_dump_num_param(dump_buf + offset,
3931 qed_grc_get_param(p_hwfn,
3932 DBG_GRC_PARAM_NUM_LTIDS));
3933 offset += qed_dump_num_param(dump_buf + offset,
3934 dump, "num-ports", port_mode);
3936 /* Dump reset registers (dumped before taking blocks out of reset ) */
3937 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3938 offset += qed_grc_dump_reset_regs(p_hwfn,
3940 dump_buf + offset, dump);
3942 /* Take all blocks out of reset (using reset registers) */
3944 qed_grc_unreset_blocks(p_hwfn, p_ptt);
3945 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3948 /* Disable all parities using MFW command */
3950 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3951 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3952 if (!parities_masked) {
3954 "Failed to mask parities using MFW\n");
3955 if (qed_grc_get_param
3956 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3957 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3961 /* Dump modified registers (dumped before modifying them) */
3962 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3963 offset += qed_grc_dump_modified_regs(p_hwfn,
3965 dump_buf + offset, dump);
3969 (qed_grc_is_included(p_hwfn,
3970 DBG_GRC_PARAM_DUMP_IOR) ||
3971 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3972 qed_grc_stall_storms(p_hwfn, p_ptt, true);
3975 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3976 bool block_enable[MAX_BLOCK_ID];
3978 /* Dump all blocks except MCP */
3979 for (i = 0; i < MAX_BLOCK_ID; i++)
3980 block_enable[i] = true;
3981 block_enable[BLOCK_MCP] = false;
3982 offset += qed_grc_dump_registers(p_hwfn,
3987 block_enable, NULL, NULL);
3989 /* Dump special registers */
3990 offset += qed_grc_dump_special_regs(p_hwfn,
3992 dump_buf + offset, dump);
3996 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3999 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4000 offset += qed_grc_dump_mcp(p_hwfn,
4001 p_ptt, dump_buf + offset, dump);
4004 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4005 offset += qed_grc_dump_ctx(p_hwfn,
4006 p_ptt, dump_buf + offset, dump);
4008 /* Dump RSS memories */
4009 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4010 offset += qed_grc_dump_rss(p_hwfn,
4011 p_ptt, dump_buf + offset, dump);
4014 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4015 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4016 offset += qed_grc_dump_big_ram(p_hwfn,
4022 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4023 offset += qed_grc_dump_iors(p_hwfn,
4024 p_ptt, dump_buf + offset, dump);
4027 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4028 offset += qed_grc_dump_vfc(p_hwfn,
4029 p_ptt, dump_buf + offset, dump);
4032 if (qed_grc_is_included(p_hwfn,
4033 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4034 CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4035 offset += qed_grc_dump_phy(p_hwfn,
4036 p_ptt, dump_buf + offset, dump);
4038 /* Dump static debug data */
4039 if (qed_grc_is_included(p_hwfn,
4040 DBG_GRC_PARAM_DUMP_STATIC) &&
4041 dev_data->bus.state == DBG_BUS_STATE_IDLE)
4042 offset += qed_grc_dump_static_debug(p_hwfn,
4044 dump_buf + offset, dump);
4046 /* Dump last section */
4047 offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4050 /* Unstall storms */
4051 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4052 qed_grc_stall_storms(p_hwfn, p_ptt, false);
4054 /* Clear parity status */
4055 qed_grc_clear_all_prty(p_hwfn, p_ptt);
4057 /* Enable all parities using MFW command */
4058 if (parities_masked)
4059 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4062 *num_dumped_dwords = offset;
4064 return DBG_STATUS_OK;
4067 /* Writes the specified failing Idle Check rule to the specified buffer.
4068 * Returns the dumped size in dwords.
4070 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4071 struct qed_ptt *p_ptt,
4076 const struct dbg_idle_chk_rule *rule,
4077 u16 fail_entry_id, u32 *cond_reg_values)
4079 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4080 const struct dbg_idle_chk_cond_reg *cond_regs;
4081 const struct dbg_idle_chk_info_reg *info_regs;
4082 u32 i, next_reg_offset = 0, offset = 0;
4083 struct dbg_idle_chk_result_hdr *hdr;
4084 const union dbg_idle_chk_reg *regs;
4087 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4088 regs = &((const union dbg_idle_chk_reg *)
4089 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4090 cond_regs = ®s[0].cond_reg;
4091 info_regs = ®s[rule->num_cond_regs].info_reg;
4093 /* Dump rule data */
4095 memset(hdr, 0, sizeof(*hdr));
4096 hdr->rule_id = rule_id;
4097 hdr->mem_entry_id = fail_entry_id;
4098 hdr->severity = rule->severity;
4099 hdr->num_dumped_cond_regs = rule->num_cond_regs;
4102 offset += IDLE_CHK_RESULT_HDR_DWORDS;
4104 /* Dump condition register values */
4105 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4106 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4107 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4109 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4110 (dump_buf + offset);
4112 /* Write register header */
4114 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4119 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4120 memset(reg_hdr, 0, sizeof(*reg_hdr));
4121 reg_hdr->start_entry = reg->start_entry;
4122 reg_hdr->size = reg->entry_size;
4123 SET_FIELD(reg_hdr->data,
4124 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4125 reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4126 SET_FIELD(reg_hdr->data,
4127 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4129 /* Write register values */
4130 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4131 dump_buf[offset] = cond_reg_values[next_reg_offset];
4134 /* Dump info register values */
4135 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4136 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4139 /* Check if register's block is in reset */
4141 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4145 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4146 if (block_id >= MAX_BLOCK_ID) {
4147 DP_NOTICE(p_hwfn, "Invalid block_id\n");
4151 if (!dev_data->block_in_reset[block_id]) {
4152 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4153 bool wide_bus, eval_mode, mode_match = true;
4154 u16 modes_buf_offset;
4157 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4158 (dump_buf + offset);
4161 eval_mode = GET_FIELD(reg->mode.data,
4162 DBG_MODE_HDR_EVAL_MODE) > 0;
4165 GET_FIELD(reg->mode.data,
4166 DBG_MODE_HDR_MODES_BUF_OFFSET);
4168 qed_is_mode_match(p_hwfn,
4175 addr = GET_FIELD(reg->data,
4176 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4177 wide_bus = GET_FIELD(reg->data,
4178 DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4180 /* Write register header */
4181 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4182 hdr->num_dumped_info_regs++;
4183 memset(reg_hdr, 0, sizeof(*reg_hdr));
4184 reg_hdr->size = reg->size;
4185 SET_FIELD(reg_hdr->data,
4186 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4187 rule->num_cond_regs + reg_id);
4189 /* Write register values */
4190 offset += qed_grc_dump_addr_range(p_hwfn,
4195 reg->size, wide_bus);
4202 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4204 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4205 u32 *dump_buf, bool dump,
4206 const struct dbg_idle_chk_rule *input_rules,
4207 u32 num_input_rules, u32 *num_failing_rules)
4209 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4210 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4215 *num_failing_rules = 0;
4217 for (i = 0; i < num_input_rules; i++) {
4218 const struct dbg_idle_chk_cond_reg *cond_regs;
4219 const struct dbg_idle_chk_rule *rule;
4220 const union dbg_idle_chk_reg *regs;
4221 u16 num_reg_entries = 1;
4222 bool check_rule = true;
4223 const u32 *imm_values;
4225 rule = &input_rules[i];
4226 regs = &((const union dbg_idle_chk_reg *)
4227 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4229 cond_regs = ®s[0].cond_reg;
4230 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4233 /* Check if all condition register blocks are out of reset, and
4234 * find maximal number of entries (all condition registers that
4235 * are memories must have the same size, which is > 1).
4237 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4240 GET_FIELD(cond_regs[reg_id].data,
4241 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4243 if (block_id >= MAX_BLOCK_ID) {
4244 DP_NOTICE(p_hwfn, "Invalid block_id\n");
4248 check_rule = !dev_data->block_in_reset[block_id];
4249 if (cond_regs[reg_id].num_entries > num_reg_entries)
4250 num_reg_entries = cond_regs[reg_id].num_entries;
4253 if (!check_rule && dump)
4256 /* Go over all register entries (number of entries is the same
4257 * for all condition registers).
4259 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4260 u32 next_reg_offset = 0;
4263 offset += qed_idle_chk_dump_failure(p_hwfn,
4271 (*num_failing_rules)++;
4275 /* Read current entry of all condition registers */
4276 for (reg_id = 0; reg_id < rule->num_cond_regs;
4278 const struct dbg_idle_chk_cond_reg *reg =
4280 u32 padded_entry_size, addr;
4283 /* Find GRC address (if it's a memory, the
4284 * address of the specific entry is calculated).
4286 addr = GET_FIELD(reg->data,
4287 DBG_IDLE_CHK_COND_REG_ADDRESS);
4289 GET_FIELD(reg->data,
4290 DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4291 if (reg->num_entries > 1 ||
4292 reg->start_entry > 0) {
4294 reg->entry_size > 1 ?
4295 roundup_pow_of_two(reg->entry_size)
4297 addr += (reg->start_entry + entry_id) *
4301 /* Read registers */
4302 if (next_reg_offset + reg->entry_size >=
4303 IDLE_CHK_MAX_ENTRIES_SIZE) {
4305 "idle check registers entry is too large\n");
4310 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4318 /* Call rule condition function.
4319 * If returns true, it's a failure.
4321 if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4323 offset += qed_idle_chk_dump_failure(p_hwfn,
4331 (*num_failing_rules)++;
4340 /* Performs Idle Check Dump to the specified buffer.
4341 * Returns the dumped size in dwords.
4343 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4344 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4346 u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4347 u32 num_failing_rules = 0;
4349 /* Dump global params */
4350 offset += qed_dump_common_global_params(p_hwfn,
4352 dump_buf + offset, dump, 1);
4353 offset += qed_dump_str_param(dump_buf + offset,
4354 dump, "dump-type", "idle-chk");
4356 /* Dump idle check section header with a single parameter */
4357 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4358 num_failing_rules_offset = offset;
4359 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4361 while (input_offset <
4362 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4363 const struct dbg_idle_chk_cond_hdr *cond_hdr =
4364 (const struct dbg_idle_chk_cond_hdr *)
4365 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4367 bool eval_mode, mode_match = true;
4368 u32 curr_failing_rules;
4369 u16 modes_buf_offset;
4372 eval_mode = GET_FIELD(cond_hdr->mode.data,
4373 DBG_MODE_HDR_EVAL_MODE) > 0;
4376 GET_FIELD(cond_hdr->mode.data,
4377 DBG_MODE_HDR_MODES_BUF_OFFSET);
4378 mode_match = qed_is_mode_match(p_hwfn,
4384 qed_idle_chk_dump_rule_entries(p_hwfn,
4388 (const struct dbg_idle_chk_rule *)
4389 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4391 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4392 &curr_failing_rules);
4393 num_failing_rules += curr_failing_rules;
4396 input_offset += cond_hdr->data_size;
4399 /* Overwrite num_rules parameter */
4401 qed_dump_num_param(dump_buf + num_failing_rules_offset,
4402 dump, "num_rules", num_failing_rules);
4404 /* Dump last section */
4405 offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4410 /* Finds the meta data image in NVRAM */
4411 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4412 struct qed_ptt *p_ptt,
4414 u32 *nvram_offset_bytes,
4415 u32 *nvram_size_bytes)
4417 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4418 struct mcp_file_att file_att;
4421 /* Call NVRAM get file command */
4422 nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4424 DRV_MSG_CODE_NVM_GET_FILE_ATT,
4428 &ret_txn_size, (u32 *)&file_att);
4430 /* Check response */
4432 (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4433 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4435 /* Update return values */
4436 *nvram_offset_bytes = file_att.nvm_start_addr;
4437 *nvram_size_bytes = file_att.len;
4441 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4442 image_type, *nvram_offset_bytes, *nvram_size_bytes);
4444 /* Check alignment */
4445 if (*nvram_size_bytes & 0x3)
4446 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4448 return DBG_STATUS_OK;
4451 /* Reads data from NVRAM */
4452 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4453 struct qed_ptt *p_ptt,
4454 u32 nvram_offset_bytes,
4455 u32 nvram_size_bytes, u32 *ret_buf)
4457 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4458 s32 bytes_left = nvram_size_bytes;
4459 u32 read_offset = 0;
4463 "nvram_read: reading image of size %d bytes from NVRAM\n",
4469 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4471 /* Call NVRAM read command */
4472 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4473 DRV_MSG_CODE_NVM_READ_NVRAM,
4474 (nvram_offset_bytes +
4477 DRV_MB_PARAM_NVM_LEN_SHIFT),
4478 &ret_mcp_resp, &ret_mcp_param,
4480 (u32 *)((u8 *)ret_buf + read_offset)))
4481 return DBG_STATUS_NVRAM_READ_FAILED;
4483 /* Check response */
4484 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4485 return DBG_STATUS_NVRAM_READ_FAILED;
4487 /* Update read offset */
4488 read_offset += ret_read_size;
4489 bytes_left -= ret_read_size;
4490 } while (bytes_left > 0);
4492 return DBG_STATUS_OK;
4495 /* Get info on the MCP Trace data in the scratchpad:
4496 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4497 * - trace_data_size (OUT): trace data size in bytes (without the header)
4499 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4500 struct qed_ptt *p_ptt,
4501 u32 *trace_data_grc_addr,
4502 u32 *trace_data_size)
4504 u32 spad_trace_offsize, signature;
4506 /* Read trace section offsize structure from MCP scratchpad */
4507 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4509 /* Extract trace section address from offsize (in scratchpad) */
4510 *trace_data_grc_addr =
4511 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4513 /* Read signature from MCP trace section */
4514 signature = qed_rd(p_hwfn, p_ptt,
4515 *trace_data_grc_addr +
4516 offsetof(struct mcp_trace, signature));
4518 if (signature != MFW_TRACE_SIGNATURE)
4519 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4521 /* Read trace size from MCP trace section */
4522 *trace_data_size = qed_rd(p_hwfn,
4524 *trace_data_grc_addr +
4525 offsetof(struct mcp_trace, size));
4527 return DBG_STATUS_OK;
4530 /* Reads MCP trace meta data image from NVRAM
4531 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4532 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4533 * loaded from file).
4534 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4536 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4537 struct qed_ptt *p_ptt,
4538 u32 trace_data_size_bytes,
4539 u32 *running_bundle_id,
4540 u32 *trace_meta_offset,
4541 u32 *trace_meta_size)
4543 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4545 /* Read MCP trace section offsize structure from MCP scratchpad */
4546 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4548 /* Find running bundle ID */
4550 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4551 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4552 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4553 if (*running_bundle_id > 1)
4554 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4556 /* Find image in NVRAM */
4558 (*running_bundle_id ==
4559 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4560 return qed_find_nvram_image(p_hwfn,
4563 trace_meta_offset, trace_meta_size);
4566 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4567 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4568 struct qed_ptt *p_ptt,
4569 u32 nvram_offset_in_bytes,
4570 u32 size_in_bytes, u32 *buf)
4572 u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4573 enum dbg_status status;
4576 /* Read meta data from NVRAM */
4577 status = qed_nvram_read(p_hwfn,
4579 nvram_offset_in_bytes, size_in_bytes, buf);
4580 if (status != DBG_STATUS_OK)
4583 /* Extract and check first signature */
4584 signature = qed_read_unaligned_dword(byte_buf);
4585 byte_buf += sizeof(signature);
4586 if (signature != NVM_MAGIC_VALUE)
4587 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4589 /* Extract number of modules */
4590 modules_num = *(byte_buf++);
4592 /* Skip all modules */
4593 for (i = 0; i < modules_num; i++) {
4594 module_len = *(byte_buf++);
4595 byte_buf += module_len;
4598 /* Extract and check second signature */
4599 signature = qed_read_unaligned_dword(byte_buf);
4600 byte_buf += sizeof(signature);
4601 if (signature != NVM_MAGIC_VALUE)
4602 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4604 return DBG_STATUS_OK;
4607 /* Dump MCP Trace */
4608 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4609 struct qed_ptt *p_ptt,
4611 bool dump, u32 *num_dumped_dwords)
4613 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4614 u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4615 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4616 enum dbg_status status;
4620 *num_dumped_dwords = 0;
4622 mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4624 /* Get trace data info */
4625 status = qed_mcp_trace_get_data_info(p_hwfn,
4627 &trace_data_grc_addr,
4628 &trace_data_size_bytes);
4629 if (status != DBG_STATUS_OK)
4632 /* Dump global params */
4633 offset += qed_dump_common_global_params(p_hwfn,
4635 dump_buf + offset, dump, 1);
4636 offset += qed_dump_str_param(dump_buf + offset,
4637 dump, "dump-type", "mcp-trace");
4639 /* Halt MCP while reading from scratchpad so the read data will be
4640 * consistent. if halt fails, MCP trace is taken anyway, with a small
4641 * risk that it may be corrupt.
4643 if (dump && mcp_access) {
4644 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4646 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4649 /* Find trace data size */
4650 trace_data_size_dwords =
4651 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4654 /* Dump trace data section header and param */
4655 offset += qed_dump_section_hdr(dump_buf + offset,
4656 dump, "mcp_trace_data", 1);
4657 offset += qed_dump_num_param(dump_buf + offset,
4658 dump, "size", trace_data_size_dwords);
4660 /* Read trace data from scratchpad into dump buffer */
4661 offset += qed_grc_dump_addr_range(p_hwfn,
4665 BYTES_TO_DWORDS(trace_data_grc_addr),
4666 trace_data_size_dwords, false);
4668 /* Resume MCP (only if halt succeeded) */
4669 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4670 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4672 /* Dump trace meta section header */
4673 offset += qed_dump_section_hdr(dump_buf + offset,
4674 dump, "mcp_trace_meta", 1);
4676 /* Read trace meta info (trace_meta_size_bytes is dword-aligned) */
4678 status = qed_mcp_trace_get_meta_info(p_hwfn,
4680 trace_data_size_bytes,
4682 &trace_meta_offset_bytes,
4683 &trace_meta_size_bytes);
4684 if (status == DBG_STATUS_OK)
4685 trace_meta_size_dwords =
4686 BYTES_TO_DWORDS(trace_meta_size_bytes);
4689 /* Dump trace meta size param */
4690 offset += qed_dump_num_param(dump_buf + offset,
4691 dump, "size", trace_meta_size_dwords);
4693 /* Read trace meta image into dump buffer */
4694 if (dump && trace_meta_size_dwords)
4695 status = qed_mcp_trace_read_meta(p_hwfn,
4697 trace_meta_offset_bytes,
4698 trace_meta_size_bytes,
4700 if (status == DBG_STATUS_OK)
4701 offset += trace_meta_size_dwords;
4703 /* Dump last section */
4704 offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4706 *num_dumped_dwords = offset;
4708 /* If no mcp access, indicate that the dump doesn't contain the meta
4711 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4715 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4716 struct qed_ptt *p_ptt,
4718 bool dump, u32 *num_dumped_dwords)
4720 u32 dwords_read, size_param_offset, offset = 0;
4723 *num_dumped_dwords = 0;
4725 /* Dump global params */
4726 offset += qed_dump_common_global_params(p_hwfn,
4728 dump_buf + offset, dump, 1);
4729 offset += qed_dump_str_param(dump_buf + offset,
4730 dump, "dump-type", "reg-fifo");
4732 /* Dump fifo data section header and param. The size param is 0 for
4733 * now, and is overwritten after reading the FIFO.
4735 offset += qed_dump_section_hdr(dump_buf + offset,
4736 dump, "reg_fifo_data", 1);
4737 size_param_offset = offset;
4738 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4741 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4742 * test how much data is available, except for reading it.
4744 offset += REG_FIFO_DEPTH_DWORDS;
4748 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4749 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4751 /* Pull available data from fifo. Use DMAE since this is widebus memory
4752 * and must be accessed atomically. Test for dwords_read not passing
4753 * buffer size since more entries could be added to the buffer as we are
4756 for (dwords_read = 0;
4757 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4758 dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
4759 REG_FIFO_ELEMENT_DWORDS) {
4760 if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
4761 (u64)(uintptr_t)(&dump_buf[offset]),
4762 REG_FIFO_ELEMENT_DWORDS, 0))
4763 return DBG_STATUS_DMAE_FAILED;
4764 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4765 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4768 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4771 /* Dump last section */
4772 offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4774 *num_dumped_dwords = offset;
4776 return DBG_STATUS_OK;
4780 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4781 struct qed_ptt *p_ptt,
4783 bool dump, u32 *num_dumped_dwords)
4785 u32 dwords_read, size_param_offset, offset = 0;
4788 *num_dumped_dwords = 0;
4790 /* Dump global params */
4791 offset += qed_dump_common_global_params(p_hwfn,
4793 dump_buf + offset, dump, 1);
4794 offset += qed_dump_str_param(dump_buf + offset,
4795 dump, "dump-type", "igu-fifo");
4797 /* Dump fifo data section header and param. The size param is 0 for
4798 * now, and is overwritten after reading the FIFO.
4800 offset += qed_dump_section_hdr(dump_buf + offset,
4801 dump, "igu_fifo_data", 1);
4802 size_param_offset = offset;
4803 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4806 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4807 * test how much data is available, except for reading it.
4809 offset += IGU_FIFO_DEPTH_DWORDS;
4813 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4814 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4816 /* Pull available data from fifo. Use DMAE since this is widebus memory
4817 * and must be accessed atomically. Test for dwords_read not passing
4818 * buffer size since more entries could be added to the buffer as we are
4821 for (dwords_read = 0;
4822 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4823 dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
4824 IGU_FIFO_ELEMENT_DWORDS) {
4825 if (qed_dmae_grc2host(p_hwfn, p_ptt,
4826 IGU_REG_ERROR_HANDLING_MEMORY,
4827 (u64)(uintptr_t)(&dump_buf[offset]),
4828 IGU_FIFO_ELEMENT_DWORDS, 0))
4829 return DBG_STATUS_DMAE_FAILED;
4830 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4831 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4834 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4837 /* Dump last section */
4838 offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4840 *num_dumped_dwords = offset;
4842 return DBG_STATUS_OK;
4845 /* Protection Override dump */
4846 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4847 struct qed_ptt *p_ptt,
4850 u32 *num_dumped_dwords)
4852 u32 size_param_offset, override_window_dwords, offset = 0;
4854 *num_dumped_dwords = 0;
4856 /* Dump global params */
4857 offset += qed_dump_common_global_params(p_hwfn,
4859 dump_buf + offset, dump, 1);
4860 offset += qed_dump_str_param(dump_buf + offset,
4861 dump, "dump-type", "protection-override");
4863 /* Dump data section header and param. The size param is 0 for now,
4864 * and is overwritten after reading the data.
4866 offset += qed_dump_section_hdr(dump_buf + offset,
4867 dump, "protection_override_data", 1);
4868 size_param_offset = offset;
4869 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4872 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4876 /* Add override window info to buffer */
4877 override_window_dwords =
4878 qed_rd(p_hwfn, p_ptt,
4879 GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4880 PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4881 if (qed_dmae_grc2host(p_hwfn, p_ptt,
4882 GRC_REG_PROTECTION_OVERRIDE_WINDOW,
4883 (u64)(uintptr_t)(dump_buf + offset),
4884 override_window_dwords, 0))
4885 return DBG_STATUS_DMAE_FAILED;
4886 offset += override_window_dwords;
4887 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4888 override_window_dwords);
4890 /* Dump last section */
4891 offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4893 *num_dumped_dwords = offset;
4895 return DBG_STATUS_OK;
4898 /* Performs FW Asserts Dump to the specified buffer.
4899 * Returns the dumped size in dwords.
4901 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4902 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4904 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4905 struct fw_asserts_ram_section *asserts;
4906 char storm_letter_str[2] = "?";
4907 struct fw_info fw_info;
4911 /* Dump global params */
4912 offset += qed_dump_common_global_params(p_hwfn,
4914 dump_buf + offset, dump, 1);
4915 offset += qed_dump_str_param(dump_buf + offset,
4916 dump, "dump-type", "fw-asserts");
4918 /* Find Storm dump size */
4919 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4920 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4921 struct storm_defs *storm = &s_storm_defs[storm_id];
4922 u32 last_list_idx, addr;
4924 if (dev_data->block_in_reset[storm->block_id])
4927 /* Read FW info for the current Storm */
4928 qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4930 asserts = &fw_info.fw_asserts_section;
4932 /* Dump FW Asserts section header and params */
4933 storm_letter_str[0] = storm->letter;
4934 offset += qed_dump_section_hdr(dump_buf + offset,
4935 dump, "fw_asserts", 2);
4936 offset += qed_dump_str_param(dump_buf + offset,
4937 dump, "storm", storm_letter_str);
4938 offset += qed_dump_num_param(dump_buf + offset,
4941 asserts->list_element_dword_size);
4943 /* Read and dump FW Asserts data */
4945 offset += asserts->list_element_dword_size;
4949 fw_asserts_section_addr = storm->sem_fast_mem_addr +
4950 SEM_FAST_REG_INT_RAM +
4951 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4952 next_list_idx_addr = fw_asserts_section_addr +
4953 DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4954 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4955 last_list_idx = (next_list_idx > 0
4957 : asserts->list_num_elements) - 1;
4958 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4959 asserts->list_dword_offset +
4960 last_list_idx * asserts->list_element_dword_size;
4962 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4965 asserts->list_element_dword_size,
4969 /* Dump last section */
4970 offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4975 /***************************** Public Functions *******************************/
4977 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
4979 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
4982 /* convert binary data to debug arrays */
4983 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4984 s_dbg_arrays[buf_id].ptr =
4985 (u32 *)(bin_ptr + buf_array[buf_id].offset);
4986 s_dbg_arrays[buf_id].size_in_dwords =
4987 BYTES_TO_DWORDS(buf_array[buf_id].length);
4990 return DBG_STATUS_OK;
4993 /* Assign default GRC param values */
4994 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4996 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4999 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5000 dev_data->grc.param_val[i] =
5001 s_grc_param_defs[i].default_val[dev_data->chip_id];
5004 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5005 struct qed_ptt *p_ptt,
5008 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5012 if (status != DBG_STATUS_OK)
5015 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5016 !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5017 !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5018 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5019 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5020 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5022 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5025 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5026 struct qed_ptt *p_ptt,
5028 u32 buf_size_in_dwords,
5029 u32 *num_dumped_dwords)
5031 u32 needed_buf_size_in_dwords;
5032 enum dbg_status status;
5034 *num_dumped_dwords = 0;
5036 status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5038 &needed_buf_size_in_dwords);
5039 if (status != DBG_STATUS_OK)
5042 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5043 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5046 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5048 /* Revert GRC params to their default */
5049 qed_dbg_grc_set_params_default(p_hwfn);
5054 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5055 struct qed_ptt *p_ptt,
5058 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5059 struct idle_chk_data *idle_chk;
5060 enum dbg_status status;
5062 idle_chk = &dev_data->idle_chk;
5065 status = qed_dbg_dev_init(p_hwfn, p_ptt);
5066 if (status != DBG_STATUS_OK)
5069 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5070 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5071 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5072 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5073 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5075 if (!idle_chk->buf_size_set) {
5076 idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5077 p_ptt, NULL, false);
5078 idle_chk->buf_size_set = true;
5081 *buf_size = idle_chk->buf_size;
5083 return DBG_STATUS_OK;
5086 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5087 struct qed_ptt *p_ptt,
5089 u32 buf_size_in_dwords,
5090 u32 *num_dumped_dwords)
5092 u32 needed_buf_size_in_dwords;
5093 enum dbg_status status;
5095 *num_dumped_dwords = 0;
5097 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5099 &needed_buf_size_in_dwords);
5100 if (status != DBG_STATUS_OK)
5103 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5104 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5106 /* Update reset state */
5107 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5109 /* Idle Check Dump */
5110 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5112 /* Revert GRC params to their default */
5113 qed_dbg_grc_set_params_default(p_hwfn);
5115 return DBG_STATUS_OK;
5118 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5119 struct qed_ptt *p_ptt,
5122 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5126 if (status != DBG_STATUS_OK)
5129 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5132 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5133 struct qed_ptt *p_ptt,
5135 u32 buf_size_in_dwords,
5136 u32 *num_dumped_dwords)
5138 u32 needed_buf_size_in_dwords;
5139 enum dbg_status status;
5142 qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5144 &needed_buf_size_in_dwords);
5145 if (status != DBG_STATUS_OK && status !=
5146 DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5149 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5150 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5152 /* Update reset state */
5153 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5156 status = qed_mcp_trace_dump(p_hwfn,
5157 p_ptt, dump_buf, true, num_dumped_dwords);
5159 /* Revert GRC params to their default */
5160 qed_dbg_grc_set_params_default(p_hwfn);
5165 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5166 struct qed_ptt *p_ptt,
5169 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5173 if (status != DBG_STATUS_OK)
5176 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5179 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5180 struct qed_ptt *p_ptt,
5182 u32 buf_size_in_dwords,
5183 u32 *num_dumped_dwords)
5185 u32 needed_buf_size_in_dwords;
5186 enum dbg_status status;
5188 *num_dumped_dwords = 0;
5190 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5192 &needed_buf_size_in_dwords);
5193 if (status != DBG_STATUS_OK)
5196 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5197 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5199 /* Update reset state */
5200 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5202 status = qed_reg_fifo_dump(p_hwfn,
5203 p_ptt, dump_buf, true, num_dumped_dwords);
5205 /* Revert GRC params to their default */
5206 qed_dbg_grc_set_params_default(p_hwfn);
5211 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5212 struct qed_ptt *p_ptt,
5215 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5219 if (status != DBG_STATUS_OK)
5222 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5225 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5226 struct qed_ptt *p_ptt,
5228 u32 buf_size_in_dwords,
5229 u32 *num_dumped_dwords)
5231 u32 needed_buf_size_in_dwords;
5232 enum dbg_status status;
5234 *num_dumped_dwords = 0;
5236 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5238 &needed_buf_size_in_dwords);
5239 if (status != DBG_STATUS_OK)
5242 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5243 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5245 /* Update reset state */
5246 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5248 status = qed_igu_fifo_dump(p_hwfn,
5249 p_ptt, dump_buf, true, num_dumped_dwords);
5250 /* Revert GRC params to their default */
5251 qed_dbg_grc_set_params_default(p_hwfn);
5257 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5258 struct qed_ptt *p_ptt,
5261 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5265 if (status != DBG_STATUS_OK)
5268 return qed_protection_override_dump(p_hwfn,
5269 p_ptt, NULL, false, buf_size);
5272 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5273 struct qed_ptt *p_ptt,
5275 u32 buf_size_in_dwords,
5276 u32 *num_dumped_dwords)
5278 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5279 enum dbg_status status;
5281 *num_dumped_dwords = 0;
5284 qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5287 if (status != DBG_STATUS_OK)
5290 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5291 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5293 /* Update reset state */
5294 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5296 status = qed_protection_override_dump(p_hwfn,
5299 true, num_dumped_dwords);
5301 /* Revert GRC params to their default */
5302 qed_dbg_grc_set_params_default(p_hwfn);
5307 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5308 struct qed_ptt *p_ptt,
5311 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5315 if (status != DBG_STATUS_OK)
5318 /* Update reset state */
5319 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5321 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5323 return DBG_STATUS_OK;
5326 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5327 struct qed_ptt *p_ptt,
5329 u32 buf_size_in_dwords,
5330 u32 *num_dumped_dwords)
5332 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5333 enum dbg_status status;
5335 *num_dumped_dwords = 0;
5338 qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5341 if (status != DBG_STATUS_OK)
5344 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5345 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5347 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5349 /* Revert GRC params to their default */
5350 qed_dbg_grc_set_params_default(p_hwfn);
5352 return DBG_STATUS_OK;
5355 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5356 struct qed_ptt *p_ptt,
5357 enum block_id block_id,
5358 enum dbg_attn_type attn_type,
5360 struct dbg_attn_block_result *results)
5362 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5363 u8 reg_idx, num_attn_regs, num_result_regs = 0;
5364 const struct dbg_attn_reg *attn_reg_arr;
5366 if (status != DBG_STATUS_OK)
5369 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5370 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5371 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5372 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5374 attn_reg_arr = qed_get_block_attn_regs(block_id,
5375 attn_type, &num_attn_regs);
5377 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5378 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5379 struct dbg_attn_reg_result *reg_result;
5380 u32 sts_addr, sts_val;
5381 u16 modes_buf_offset;
5385 eval_mode = GET_FIELD(reg_data->mode.data,
5386 DBG_MODE_HDR_EVAL_MODE) > 0;
5387 modes_buf_offset = GET_FIELD(reg_data->mode.data,
5388 DBG_MODE_HDR_MODES_BUF_OFFSET);
5389 if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5392 /* Mode match - read attention status register */
5393 sts_addr = DWORDS_TO_BYTES(clear_status ?
5394 reg_data->sts_clr_address :
5395 GET_FIELD(reg_data->data,
5396 DBG_ATTN_REG_STS_ADDRESS));
5397 sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5401 /* Non-zero attention status - add to results */
5402 reg_result = &results->reg_results[num_result_regs];
5403 SET_FIELD(reg_result->data,
5404 DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5405 SET_FIELD(reg_result->data,
5406 DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5407 GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5408 reg_result->block_attn_offset = reg_data->block_attn_offset;
5409 reg_result->sts_val = sts_val;
5410 reg_result->mask_val = qed_rd(p_hwfn,
5413 (reg_data->mask_address));
5417 results->block_id = (u8)block_id;
5418 results->names_offset =
5419 qed_get_block_attn_data(block_id, attn_type)->names_offset;
5420 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5421 SET_FIELD(results->data,
5422 DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5424 return DBG_STATUS_OK;
5427 /******************************* Data Types **********************************/
5434 struct mcp_trace_format {
5436 #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
5437 #define MCP_TRACE_FORMAT_MODULE_SHIFT 0
5438 #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
5439 #define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
5440 #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
5441 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
5442 #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
5443 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
5444 #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
5445 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
5446 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
5447 #define MCP_TRACE_FORMAT_LEN_SHIFT 24
5452 /* Meta data structure, generated by a perl script during MFW build. therefore,
5453 * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
5456 struct mcp_trace_meta {
5460 struct mcp_trace_format *formats;
5463 /* REG fifo element */
5464 struct reg_fifo_element {
5466 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
5467 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
5468 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
5469 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
5470 #define REG_FIFO_ELEMENT_PF_SHIFT 24
5471 #define REG_FIFO_ELEMENT_PF_MASK 0xf
5472 #define REG_FIFO_ELEMENT_VF_SHIFT 28
5473 #define REG_FIFO_ELEMENT_VF_MASK 0xff
5474 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
5475 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
5476 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
5477 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
5478 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
5479 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
5480 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
5481 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
5482 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
5483 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
5486 /* IGU fifo element */
5487 struct igu_fifo_element {
5489 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
5490 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
5491 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
5492 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
5493 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
5494 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
5495 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
5496 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
5497 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
5498 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
5501 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
5502 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
5503 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
5504 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
5508 struct igu_fifo_wr_data {
5510 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
5511 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
5512 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
5513 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
5514 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
5515 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
5516 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
5517 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
5518 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
5519 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
5520 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
5521 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
5524 struct igu_fifo_cleanup_wr_data {
5526 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
5527 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
5528 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
5529 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
5530 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
5531 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
5532 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
5533 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
5536 /* Protection override element */
5537 struct protection_override_element {
5539 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
5540 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
5541 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
5542 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
5543 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
5544 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
5545 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
5546 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
5547 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
5548 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
5549 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
5550 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
5553 enum igu_fifo_sources {
5567 enum igu_fifo_addr_types {
5568 IGU_ADDR_TYPE_MSIX_MEM,
5569 IGU_ADDR_TYPE_WRITE_PBA,
5570 IGU_ADDR_TYPE_WRITE_INT_ACK,
5571 IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5572 IGU_ADDR_TYPE_READ_INT,
5573 IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5574 IGU_ADDR_TYPE_RESERVED
5577 struct igu_fifo_addr_data {
5582 enum igu_fifo_addr_types type;
5585 /******************************** Constants **********************************/
5587 #define MAX_MSG_LEN 1024
5589 #define MCP_TRACE_MAX_MODULE_LEN 8
5590 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
5591 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5592 (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5594 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
5595 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
5597 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
5599 /********************************* Macros ************************************/
5601 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
5603 /***************************** Constant Arrays *******************************/
5605 struct user_dbg_array {
5611 static struct user_dbg_array
5612 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5614 /* Block names array */
5615 static struct block_info s_block_info_arr[] = {
5617 {"miscs", BLOCK_MISCS},
5618 {"misc", BLOCK_MISC},
5620 {"pglue_b", BLOCK_PGLUE_B},
5621 {"cnig", BLOCK_CNIG},
5622 {"cpmu", BLOCK_CPMU},
5623 {"ncsi", BLOCK_NCSI},
5624 {"opte", BLOCK_OPTE},
5626 {"pcie", BLOCK_PCIE},
5628 {"mcp2", BLOCK_MCP2},
5629 {"pswhst", BLOCK_PSWHST},
5630 {"pswhst2", BLOCK_PSWHST2},
5631 {"pswrd", BLOCK_PSWRD},
5632 {"pswrd2", BLOCK_PSWRD2},
5633 {"pswwr", BLOCK_PSWWR},
5634 {"pswwr2", BLOCK_PSWWR2},
5635 {"pswrq", BLOCK_PSWRQ},
5636 {"pswrq2", BLOCK_PSWRQ2},
5637 {"pglcs", BLOCK_PGLCS},
5639 {"dmae", BLOCK_DMAE},
5648 {"dorq", BLOCK_DORQ},
5652 {"tsdm", BLOCK_TSDM},
5653 {"msdm", BLOCK_MSDM},
5654 {"usdm", BLOCK_USDM},
5655 {"xsdm", BLOCK_XSDM},
5656 {"ysdm", BLOCK_YSDM},
5657 {"psdm", BLOCK_PSDM},
5658 {"tsem", BLOCK_TSEM},
5659 {"msem", BLOCK_MSEM},
5660 {"usem", BLOCK_USEM},
5661 {"xsem", BLOCK_XSEM},
5662 {"ysem", BLOCK_YSEM},
5663 {"psem", BLOCK_PSEM},
5665 {"tmld", BLOCK_TMLD},
5666 {"muld", BLOCK_MULD},
5667 {"yuld", BLOCK_YULD},
5668 {"xyld", BLOCK_XYLD},
5669 {"ptld", BLOCK_PTLD},
5670 {"ypld", BLOCK_YPLD},
5672 {"pbf_pb1", BLOCK_PBF_PB1},
5673 {"pbf_pb2", BLOCK_PBF_PB2},
5677 {"rdif", BLOCK_RDIF},
5678 {"tdif", BLOCK_TDIF},
5680 {"ccfc", BLOCK_CCFC},
5681 {"tcfc", BLOCK_TCFC},
5684 {"rgfs", BLOCK_RGFS},
5685 {"rgsrc", BLOCK_RGSRC},
5686 {"tgfs", BLOCK_TGFS},
5687 {"tgsrc", BLOCK_TGSRC},
5688 {"umac", BLOCK_UMAC},
5689 {"xmac", BLOCK_XMAC},
5693 {"bmbn", BLOCK_BMBN},
5698 {"phy_pcie", BLOCK_PHY_PCIE},
5700 {"avs_wrap", BLOCK_AVS_WRAP},
5701 {"misc_aeu", BLOCK_MISC_AEU},
5702 {"bar0_map", BLOCK_BAR0_MAP}
5705 /* Status string array */
5706 static const char * const s_status_str[] = {
5708 "Operation completed successfully",
5710 /* DBG_STATUS_APP_VERSION_NOT_SET */
5711 "Debug application version wasn't set",
5713 /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5714 "Unsupported debug application version",
5716 /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5717 "The debug block wasn't reset since the last recording",
5719 /* DBG_STATUS_INVALID_ARGS */
5720 "Invalid arguments",
5722 /* DBG_STATUS_OUTPUT_ALREADY_SET */
5723 "The debug output was already set",
5725 /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5726 "Invalid PCI buffer size",
5728 /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5729 "PCI buffer allocation failed",
5731 /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5732 "A PCI buffer wasn't allocated",
5734 /* DBG_STATUS_TOO_MANY_INPUTS */
5735 "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5737 /* DBG_STATUS_INPUT_OVERLAP */
5738 "Overlapping debug bus inputs",
5740 /* DBG_STATUS_HW_ONLY_RECORDING */
5741 "Cannot record Storm data since the entire recording cycle is used by HW",
5743 /* DBG_STATUS_STORM_ALREADY_ENABLED */
5744 "The Storm was already enabled",
5746 /* DBG_STATUS_STORM_NOT_ENABLED */
5747 "The specified Storm wasn't enabled",
5749 /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5750 "The block was already enabled",
5752 /* DBG_STATUS_BLOCK_NOT_ENABLED */
5753 "The specified block wasn't enabled",
5755 /* DBG_STATUS_NO_INPUT_ENABLED */
5756 "No input was enabled for recording",
5758 /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5759 "Filters and triggers are not allowed when recording in 64b units",
5761 /* DBG_STATUS_FILTER_ALREADY_ENABLED */
5762 "The filter was already enabled",
5764 /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5765 "The trigger was already enabled",
5767 /* DBG_STATUS_TRIGGER_NOT_ENABLED */
5768 "The trigger wasn't enabled",
5770 /* DBG_STATUS_CANT_ADD_CONSTRAINT */
5771 "A constraint can be added only after a filter was enabled or a trigger state was added",
5773 /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5774 "Cannot add more than 3 trigger states",
5776 /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5777 "Cannot add more than 4 constraints per filter or trigger state",
5779 /* DBG_STATUS_RECORDING_NOT_STARTED */
5780 "The recording wasn't started",
5782 /* DBG_STATUS_DATA_DIDNT_TRIGGER */
5783 "A trigger was configured, but it didn't trigger",
5785 /* DBG_STATUS_NO_DATA_RECORDED */
5786 "No data was recorded",
5788 /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5789 "Dump buffer is too small",
5791 /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5792 "Dumped data is not aligned to chunks",
5794 /* DBG_STATUS_UNKNOWN_CHIP */
5797 /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5798 "Failed allocating virtual memory",
5800 /* DBG_STATUS_BLOCK_IN_RESET */
5801 "The input block is in reset",
5803 /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5804 "Invalid MCP trace signature found in NVRAM",
5806 /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5807 "Invalid bundle ID found in NVRAM",
5809 /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5810 "Failed getting NVRAM image",
5812 /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5813 "NVRAM image is not dword-aligned",
5815 /* DBG_STATUS_NVRAM_READ_FAILED */
5816 "Failed reading from NVRAM",
5818 /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5819 "Idle check parsing failed",
5821 /* DBG_STATUS_MCP_TRACE_BAD_DATA */
5822 "MCP Trace data is corrupt",
5824 /* DBG_STATUS_MCP_TRACE_NO_META */
5825 "Dump doesn't contain meta data - it must be provided in image file",
5827 /* DBG_STATUS_MCP_COULD_NOT_HALT */
5828 "Failed to halt MCP",
5830 /* DBG_STATUS_MCP_COULD_NOT_RESUME */
5831 "Failed to resume MCP after halt",
5833 /* DBG_STATUS_DMAE_FAILED */
5834 "DMAE transaction failed",
5836 /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5837 "Failed to empty SEMI sync FIFO",
5839 /* DBG_STATUS_IGU_FIFO_BAD_DATA */
5840 "IGU FIFO data is corrupt",
5842 /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5843 "MCP failed to mask parities",
5845 /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5846 "FW Asserts parsing failed",
5848 /* DBG_STATUS_REG_FIFO_BAD_DATA */
5849 "GRC FIFO data is corrupt",
5851 /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5852 "Protection Override data is corrupt",
5854 /* DBG_STATUS_DBG_ARRAY_NOT_SET */
5855 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5857 /* DBG_STATUS_FILTER_BUG */
5858 "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5860 /* DBG_STATUS_NON_MATCHING_LINES */
5861 "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5863 /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5864 "The selected trigger dword offset wasn't enabled in the recorded HW block",
5866 /* DBG_STATUS_DBG_BUS_IN_USE */
5867 "The debug bus is in use"
5870 /* Idle check severity names array */
5871 static const char * const s_idle_chk_severity_str[] = {
5873 "Error if no traffic",
5877 /* MCP Trace level names array */
5878 static const char * const s_mcp_trace_level_str[] = {
5884 /* Access type names array */
5885 static const char * const s_access_strs[] = {
5890 /* Privilege type names array */
5891 static const char * const s_privilege_strs[] = {
5898 /* Protection type names array */
5899 static const char * const s_protection_strs[] = {
5910 /* Master type names array */
5911 static const char * const s_master_strs[] = {
5930 /* REG FIFO error messages array */
5931 static const char * const s_reg_fifo_error_strs[] = {
5933 "address doesn't belong to any block",
5934 "reserved address in block or write to read-only address",
5935 "privilege/protection mismatch",
5936 "path isolation error"
5939 /* IGU FIFO sources array */
5940 static const char * const s_igu_fifo_source_strs[] = {
5954 /* IGU FIFO error messages */
5955 static const char * const s_igu_fifo_error_strs[] = {
5958 "function disabled",
5959 "VF sent command to attnetion address",
5960 "host sent prod update command",
5961 "read of during interrupt register while in MIMD mode",
5962 "access to PXP BAR reserved address",
5963 "producer update command to attention index",
5965 "SB index not valid",
5966 "SB relative index and FID not found",
5968 "command with error flag asserted (PCI error or CAU discard)",
5969 "VF sent cleanup and RF cleanup is disabled",
5970 "cleanup command on type bigger than 4"
5973 /* IGU FIFO address data */
5974 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5975 {0x0, 0x101, "MSI-X Memory", NULL,
5976 IGU_ADDR_TYPE_MSIX_MEM},
5977 {0x102, 0x1ff, "reserved", NULL,
5978 IGU_ADDR_TYPE_RESERVED},
5979 {0x200, 0x200, "Write PBA[0:63]", NULL,
5980 IGU_ADDR_TYPE_WRITE_PBA},
5981 {0x201, 0x201, "Write PBA[64:127]", "reserved",
5982 IGU_ADDR_TYPE_WRITE_PBA},
5983 {0x202, 0x202, "Write PBA[128]", "reserved",
5984 IGU_ADDR_TYPE_WRITE_PBA},
5985 {0x203, 0x3ff, "reserved", NULL,
5986 IGU_ADDR_TYPE_RESERVED},
5987 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5988 IGU_ADDR_TYPE_WRITE_INT_ACK},
5989 {0x5f0, 0x5f0, "Attention bits update", NULL,
5990 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5991 {0x5f1, 0x5f1, "Attention bits set", NULL,
5992 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5993 {0x5f2, 0x5f2, "Attention bits clear", NULL,
5994 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5995 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5996 IGU_ADDR_TYPE_READ_INT},
5997 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5998 IGU_ADDR_TYPE_READ_INT},
5999 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6000 IGU_ADDR_TYPE_READ_INT},
6001 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6002 IGU_ADDR_TYPE_READ_INT},
6003 {0x5f7, 0x5ff, "reserved", NULL,
6004 IGU_ADDR_TYPE_RESERVED},
6005 {0x600, 0x7ff, "Producer update", NULL,
6006 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6009 /******************************** Variables **********************************/
6011 /* MCP Trace meta data - used in case the dump doesn't contain the meta data
6012 * (e.g. due to no NVRAM access).
6014 static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 };
6016 /* Temporary buffer, used for print size calculations */
6017 static char s_temp_buf[MAX_MSG_LEN];
6019 /**************************** Private Functions ******************************/
6021 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6023 return (a + b) % size;
6026 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6028 return (size + a - b) % size;
6031 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6032 * bytes) and returns them as a dword value. the specified buffer offset is
6035 static u32 qed_read_from_cyclic_buf(void *buf,
6037 u32 buf_size, u8 num_bytes_to_read)
6039 u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6042 val_ptr = (u8 *)&val;
6044 for (i = 0; i < num_bytes_to_read; i++) {
6045 val_ptr[i] = bytes_buf[*offset];
6046 *offset = qed_cyclic_add(*offset, 1, buf_size);
6052 /* Reads and returns the next byte from the specified buffer.
6053 * The specified buffer offset is updated.
6055 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6057 return ((u8 *)buf)[(*offset)++];
6060 /* Reads and returns the next dword from the specified buffer.
6061 * The specified buffer offset is updated.
6063 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6065 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6072 /* Reads the next string from the specified buffer, and copies it to the
6073 * specified pointer. The specified buffer offset is updated.
6075 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6077 const char *source_str = &((const char *)buf)[*offset];
6079 strncpy(dest, source_str, size);
6080 dest[size - 1] = '\0';
6084 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6085 * If the specified buffer in NULL, a temporary buffer pointer is returned.
6087 static char *qed_get_buf_ptr(void *buf, u32 offset)
6089 return buf ? (char *)buf + offset : s_temp_buf;
6092 /* Reads a param from the specified buffer. Returns the number of dwords read.
6093 * If the returned str_param is NULL, the param is numeric and its value is
6094 * returned in num_param.
6095 * Otheriwise, the param is a string and its pointer is returned in str_param.
6097 static u32 qed_read_param(u32 *dump_buf,
6098 const char **param_name,
6099 const char **param_str_val, u32 *param_num_val)
6101 char *char_buf = (char *)dump_buf;
6104 /* Extract param name */
6105 *param_name = char_buf;
6106 offset += strlen(*param_name) + 1;
6108 /* Check param type */
6109 if (*(char_buf + offset++)) {
6111 *param_str_val = char_buf + offset;
6112 offset += strlen(*param_str_val) + 1;
6114 offset += (4 - (offset & 0x3));
6117 *param_str_val = NULL;
6119 offset += (4 - (offset & 0x3));
6120 *param_num_val = *(u32 *)(char_buf + offset);
6127 /* Reads a section header from the specified buffer.
6128 * Returns the number of dwords read.
6130 static u32 qed_read_section_hdr(u32 *dump_buf,
6131 const char **section_name,
6132 u32 *num_section_params)
6134 const char *param_str_val;
6136 return qed_read_param(dump_buf,
6137 section_name, ¶m_str_val, num_section_params);
6140 /* Reads section params from the specified buffer and prints them to the results
6141 * buffer. Returns the number of dwords read.
6143 static u32 qed_print_section_params(u32 *dump_buf,
6144 u32 num_section_params,
6145 char *results_buf, u32 *num_chars_printed)
6147 u32 i, dump_offset = 0, results_offset = 0;
6149 for (i = 0; i < num_section_params; i++) {
6150 const char *param_name, *param_str_val;
6151 u32 param_num_val = 0;
6153 dump_offset += qed_read_param(dump_buf + dump_offset,
6155 ¶m_str_val, ¶m_num_val);
6159 sprintf(qed_get_buf_ptr(results_buf,
6161 "%s: %s\n", param_name, param_str_val);
6162 else if (strcmp(param_name, "fw-timestamp"))
6164 sprintf(qed_get_buf_ptr(results_buf,
6166 "%s: %d\n", param_name, param_num_val);
6169 results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6172 *num_chars_printed = results_offset;
6177 /* Parses the idle check rules and returns the number of characters printed.
6178 * In case of parsing error, returns 0.
6180 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6184 bool print_fw_idle_chk,
6186 u32 *num_errors, u32 *num_warnings)
6188 /* Offset in results_buf in bytes */
6189 u32 results_offset = 0;
6197 /* Go over dumped results */
6198 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6200 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6201 struct dbg_idle_chk_result_hdr *hdr;
6202 const char *parsing_str, *lsi_msg;
6203 u32 parsing_str_offset;
6207 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6209 (const struct dbg_idle_chk_rule_parsing_data *)
6210 &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6212 parsing_str_offset =
6213 GET_FIELD(rule_parsing_data->data,
6214 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6216 GET_FIELD(rule_parsing_data->data,
6217 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6220 s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6221 [parsing_str_offset];
6222 lsi_msg = parsing_str;
6225 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6228 /* Skip rule header */
6229 dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6231 /* Update errors/warnings count */
6232 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6233 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6238 /* Print rule severity */
6240 sprintf(qed_get_buf_ptr(results_buf,
6241 results_offset), "%s: ",
6242 s_idle_chk_severity_str[hdr->severity]);
6244 /* Print rule message */
6246 parsing_str += strlen(parsing_str) + 1;
6248 sprintf(qed_get_buf_ptr(results_buf,
6249 results_offset), "%s.",
6251 print_fw_idle_chk ? parsing_str : lsi_msg);
6252 parsing_str += strlen(parsing_str) + 1;
6254 /* Print register values */
6256 sprintf(qed_get_buf_ptr(results_buf,
6257 results_offset), " Registers:");
6259 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6261 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6266 (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6267 is_mem = GET_FIELD(reg_hdr->data,
6268 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6269 reg_id = GET_FIELD(reg_hdr->data,
6270 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6272 /* Skip reg header */
6273 dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6275 /* Skip register names until the required reg_id is
6278 for (; reg_id > curr_reg_id;
6280 parsing_str += strlen(parsing_str) + 1);
6283 sprintf(qed_get_buf_ptr(results_buf,
6284 results_offset), " %s",
6286 if (i < hdr->num_dumped_cond_regs && is_mem)
6288 sprintf(qed_get_buf_ptr(results_buf,
6290 "[%d]", hdr->mem_entry_id +
6291 reg_hdr->start_entry);
6293 sprintf(qed_get_buf_ptr(results_buf,
6294 results_offset), "=");
6295 for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6297 sprintf(qed_get_buf_ptr(results_buf,
6300 if (j < reg_hdr->size - 1)
6302 sprintf(qed_get_buf_ptr
6304 results_offset), ",");
6309 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6312 /* Check if end of dump buffer was exceeded */
6313 if (dump_buf > dump_buf_end)
6316 return results_offset;
6319 /* Parses an idle check dump buffer.
6320 * If result_buf is not NULL, the idle check results are printed to it.
6321 * In any case, the required results buffer size is assigned to
6322 * parsed_results_bytes.
6323 * The parsing status is returned.
6325 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6327 u32 num_dumped_dwords,
6329 u32 *parsed_results_bytes,
6333 const char *section_name, *param_name, *param_str_val;
6334 u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6335 u32 num_section_params = 0, num_rules;
6337 /* Offset in results_buf in bytes */
6338 u32 results_offset = 0;
6340 *parsed_results_bytes = 0;
6344 if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6345 !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6346 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6348 /* Read global_params section */
6349 dump_buf += qed_read_section_hdr(dump_buf,
6350 §ion_name, &num_section_params);
6351 if (strcmp(section_name, "global_params"))
6352 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6354 /* Print global params */
6355 dump_buf += qed_print_section_params(dump_buf,
6357 results_buf, &results_offset);
6359 /* Read idle_chk section */
6360 dump_buf += qed_read_section_hdr(dump_buf,
6361 §ion_name, &num_section_params);
6362 if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6363 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6364 dump_buf += qed_read_param(dump_buf,
6365 ¶m_name, ¶m_str_val, &num_rules);
6366 if (strcmp(param_name, "num_rules"))
6367 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6370 u32 rules_print_size;
6372 /* Print FW output */
6374 sprintf(qed_get_buf_ptr(results_buf,
6376 "FW_IDLE_CHECK:\n");
6378 qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
6379 dump_buf_end, num_rules,
6383 results_offset : NULL,
6384 num_errors, num_warnings);
6385 results_offset += rules_print_size;
6386 if (!rules_print_size)
6387 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6389 /* Print LSI output */
6391 sprintf(qed_get_buf_ptr(results_buf,
6393 "\nLSI_IDLE_CHECK:\n");
6395 qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
6396 dump_buf_end, num_rules,
6400 results_offset : NULL,
6401 num_errors, num_warnings);
6402 results_offset += rules_print_size;
6403 if (!rules_print_size)
6404 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6407 /* Print errors/warnings count */
6410 sprintf(qed_get_buf_ptr(results_buf,
6412 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6413 *num_errors, *num_warnings);
6414 else if (*num_warnings)
6416 sprintf(qed_get_buf_ptr(results_buf,
6418 "\nIdle Check completed successfully (with %d warnings)\n",
6422 sprintf(qed_get_buf_ptr(results_buf,
6424 "\nIdle Check completed successfully\n");
6426 /* Add 1 for string NULL termination */
6427 *parsed_results_bytes = results_offset + 1;
6429 return DBG_STATUS_OK;
6432 /* Frees the specified MCP Trace meta data */
6433 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
6434 struct mcp_trace_meta *meta)
6438 /* Release modules */
6439 if (meta->modules) {
6440 for (i = 0; i < meta->modules_num; i++)
6441 kfree(meta->modules[i]);
6442 kfree(meta->modules);
6445 /* Release formats */
6446 if (meta->formats) {
6447 for (i = 0; i < meta->formats_num; i++)
6448 kfree(meta->formats[i].format_str);
6449 kfree(meta->formats);
6453 /* Allocates and fills MCP Trace meta data based on the specified meta data
6455 * Returns debug status code.
6457 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
6458 const u32 *meta_buf,
6459 struct mcp_trace_meta *meta)
6461 u8 *meta_buf_bytes = (u8 *)meta_buf;
6462 u32 offset = 0, signature, i;
6464 memset(meta, 0, sizeof(*meta));
6466 /* Read first signature */
6467 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6468 if (signature != NVM_MAGIC_VALUE)
6469 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6471 /* Read no. of modules and allocate memory for their pointers */
6472 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6473 meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
6475 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6477 /* Allocate and read all module strings */
6478 for (i = 0; i < meta->modules_num; i++) {
6479 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6481 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6482 if (!(*(meta->modules + i))) {
6483 /* Update number of modules to be released */
6484 meta->modules_num = i ? i - 1 : 0;
6485 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6488 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6489 *(meta->modules + i));
6490 if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6491 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6494 /* Read second signature */
6495 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6496 if (signature != NVM_MAGIC_VALUE)
6497 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6499 /* Read number of formats and allocate memory for all formats */
6500 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6501 meta->formats = kzalloc(meta->formats_num *
6502 sizeof(struct mcp_trace_format),
6505 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6507 /* Allocate and read all strings */
6508 for (i = 0; i < meta->formats_num; i++) {
6509 struct mcp_trace_format *format_ptr = &meta->formats[i];
6512 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6516 MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6517 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6518 if (!format_ptr->format_str) {
6519 /* Update number of modules to be released */
6520 meta->formats_num = i ? i - 1 : 0;
6521 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6524 qed_read_str_from_buf(meta_buf_bytes,
6526 format_len, format_ptr->format_str);
6529 return DBG_STATUS_OK;
6532 /* Parses an MCP Trace dump buffer.
6533 * If result_buf is not NULL, the MCP Trace results are printed to it.
6534 * In any case, the required results buffer size is assigned to
6535 * parsed_results_bytes.
6536 * The parsing status is returned.
6538 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6540 u32 num_dumped_dwords,
6542 u32 *parsed_results_bytes)
6544 u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords;
6545 u32 param_mask, param_shift, param_num_val, num_section_params;
6546 const char *section_name, *param_name, *param_str_val;
6547 u32 offset, results_offset = 0;
6548 struct mcp_trace_meta meta;
6549 struct mcp_trace *trace;
6550 enum dbg_status status;
6551 const u32 *meta_buf;
6554 *parsed_results_bytes = 0;
6556 /* Read global_params section */
6557 dump_buf += qed_read_section_hdr(dump_buf,
6558 §ion_name, &num_section_params);
6559 if (strcmp(section_name, "global_params"))
6560 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6562 /* Print global params */
6563 dump_buf += qed_print_section_params(dump_buf,
6565 results_buf, &results_offset);
6567 /* Read trace_data section */
6568 dump_buf += qed_read_section_hdr(dump_buf,
6569 §ion_name, &num_section_params);
6570 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6571 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6572 dump_buf += qed_read_param(dump_buf,
6573 ¶m_name, ¶m_str_val, ¶m_num_val);
6574 if (strcmp(param_name, "size"))
6575 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6576 trace_data_dwords = param_num_val;
6578 /* Prepare trace info */
6579 trace = (struct mcp_trace *)dump_buf;
6580 trace_buf = (u8 *)dump_buf + sizeof(*trace);
6581 offset = trace->trace_oldest;
6582 end_offset = trace->trace_prod;
6583 bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
6584 dump_buf += trace_data_dwords;
6586 /* Read meta_data section */
6587 dump_buf += qed_read_section_hdr(dump_buf,
6588 §ion_name, &num_section_params);
6589 if (strcmp(section_name, "mcp_trace_meta"))
6590 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6591 dump_buf += qed_read_param(dump_buf,
6592 ¶m_name, ¶m_str_val, ¶m_num_val);
6593 if (strcmp(param_name, "size"))
6594 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6595 trace_meta_dwords = param_num_val;
6597 /* Choose meta data buffer */
6598 if (!trace_meta_dwords) {
6599 /* Dump doesn't include meta data */
6600 if (!s_mcp_trace_meta.ptr)
6601 return DBG_STATUS_MCP_TRACE_NO_META;
6602 meta_buf = s_mcp_trace_meta.ptr;
6604 /* Dump includes meta data */
6605 meta_buf = dump_buf;
6608 /* Allocate meta data memory */
6609 status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
6610 if (status != DBG_STATUS_OK)
6613 /* Ignore the level and modules masks - just print everything that is
6614 * already in the buffer.
6616 while (bytes_left) {
6617 struct mcp_trace_format *format_ptr;
6618 u8 format_level, format_module;
6619 u32 params[3] = { 0, 0, 0 };
6620 u32 header, format_idx, i;
6622 if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
6623 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6627 header = qed_read_from_cyclic_buf(trace_buf,
6630 MFW_TRACE_ENTRY_SIZE);
6631 bytes_left -= MFW_TRACE_ENTRY_SIZE;
6632 format_idx = header & MFW_TRACE_EVENTID_MASK;
6634 /* Skip message if its index doesn't exist in the meta data */
6635 if (format_idx > meta.formats_num) {
6638 MFW_TRACE_PRM_SIZE_MASK) >>
6639 MFW_TRACE_PRM_SIZE_SHIFT);
6641 if (bytes_left < format_size) {
6642 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6646 offset = qed_cyclic_add(offset,
6647 format_size, trace->size);
6648 bytes_left -= format_size;
6652 format_ptr = &meta.formats[format_idx];
6655 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6656 MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6657 i < MCP_TRACE_FORMAT_MAX_PARAMS;
6658 i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6659 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6660 /* Extract param size (0..3) */
6662 (u8)((format_ptr->data &
6663 param_mask) >> param_shift);
6665 /* If the param size is zero, there are no other
6671 /* Size is encoded using 2 bits, where 3 is used to
6674 if (param_size == 3)
6677 if (bytes_left < param_size) {
6678 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6682 params[i] = qed_read_from_cyclic_buf(trace_buf,
6687 bytes_left -= param_size;
6691 (u8)((format_ptr->data &
6692 MCP_TRACE_FORMAT_LEVEL_MASK) >>
6693 MCP_TRACE_FORMAT_LEVEL_SHIFT);
6695 (u8)((format_ptr->data &
6696 MCP_TRACE_FORMAT_MODULE_MASK) >>
6697 MCP_TRACE_FORMAT_MODULE_SHIFT);
6698 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
6699 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6703 /* Print current message to results buffer */
6705 sprintf(qed_get_buf_ptr(results_buf,
6706 results_offset), "%s %-8s: ",
6707 s_mcp_trace_level_str[format_level],
6708 meta.modules[format_module]);
6710 sprintf(qed_get_buf_ptr(results_buf,
6712 format_ptr->format_str, params[0], params[1],
6717 *parsed_results_bytes = results_offset + 1;
6718 qed_mcp_trace_free_meta(p_hwfn, &meta);
6722 /* Parses a Reg FIFO dump buffer.
6723 * If result_buf is not NULL, the Reg FIFO results are printed to it.
6724 * In any case, the required results buffer size is assigned to
6725 * parsed_results_bytes.
6726 * The parsing status is returned.
6728 static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
6730 u32 num_dumped_dwords,
6732 u32 *parsed_results_bytes)
6734 const char *section_name, *param_name, *param_str_val;
6735 u32 param_num_val, num_section_params, num_elements;
6736 struct reg_fifo_element *elements;
6737 u8 i, j, err_val, vf_val;
6738 u32 results_offset = 0;
6741 /* Read global_params section */
6742 dump_buf += qed_read_section_hdr(dump_buf,
6743 §ion_name, &num_section_params);
6744 if (strcmp(section_name, "global_params"))
6745 return DBG_STATUS_REG_FIFO_BAD_DATA;
6747 /* Print global params */
6748 dump_buf += qed_print_section_params(dump_buf,
6750 results_buf, &results_offset);
6752 /* Read reg_fifo_data section */
6753 dump_buf += qed_read_section_hdr(dump_buf,
6754 §ion_name, &num_section_params);
6755 if (strcmp(section_name, "reg_fifo_data"))
6756 return DBG_STATUS_REG_FIFO_BAD_DATA;
6757 dump_buf += qed_read_param(dump_buf,
6758 ¶m_name, ¶m_str_val, ¶m_num_val);
6759 if (strcmp(param_name, "size"))
6760 return DBG_STATUS_REG_FIFO_BAD_DATA;
6761 if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6762 return DBG_STATUS_REG_FIFO_BAD_DATA;
6763 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6764 elements = (struct reg_fifo_element *)dump_buf;
6766 /* Decode elements */
6767 for (i = 0; i < num_elements; i++) {
6768 bool err_printed = false;
6770 /* Discover if element belongs to a VF or a PF */
6771 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6772 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6773 sprintf(vf_str, "%s", "N/A");
6775 sprintf(vf_str, "%d", vf_val);
6777 /* Add parsed element to parsed buffer */
6779 sprintf(qed_get_buf_ptr(results_buf,
6781 "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6783 (u32)GET_FIELD(elements[i].data,
6784 REG_FIFO_ELEMENT_ADDRESS) *
6785 REG_FIFO_ELEMENT_ADDR_FACTOR,
6786 s_access_strs[GET_FIELD(elements[i].data,
6787 REG_FIFO_ELEMENT_ACCESS)],
6788 (u32)GET_FIELD(elements[i].data,
6789 REG_FIFO_ELEMENT_PF),
6791 (u32)GET_FIELD(elements[i].data,
6792 REG_FIFO_ELEMENT_PORT),
6793 s_privilege_strs[GET_FIELD(elements[i].data,
6794 REG_FIFO_ELEMENT_PRIVILEGE)],
6795 s_protection_strs[GET_FIELD(elements[i].data,
6796 REG_FIFO_ELEMENT_PROTECTION)],
6797 s_master_strs[GET_FIELD(elements[i].data,
6798 REG_FIFO_ELEMENT_MASTER)]);
6802 err_val = GET_FIELD(elements[i].data,
6803 REG_FIFO_ELEMENT_ERROR);
6804 j < ARRAY_SIZE(s_reg_fifo_error_strs);
6805 j++, err_val >>= 1) {
6806 if (err_val & 0x1) {
6809 sprintf(qed_get_buf_ptr
6811 results_offset), ", ");
6813 sprintf(qed_get_buf_ptr
6814 (results_buf, results_offset), "%s",
6815 s_reg_fifo_error_strs[j]);
6821 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6824 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6826 "fifo contained %d elements", num_elements);
6828 /* Add 1 for string NULL termination */
6829 *parsed_results_bytes = results_offset + 1;
6831 return DBG_STATUS_OK;
6834 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6837 u32 *results_offset,
6838 u32 *parsed_results_bytes)
6840 const struct igu_fifo_addr_data *found_addr = NULL;
6841 u8 source, err_type, i, is_cleanup;
6842 char parsed_addr_data[32];
6843 char parsed_wr_data[256];
6844 u32 wr_data, prod_cons;
6845 bool is_wr_cmd, is_pf;
6849 /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6852 dword12 = ((u64)element->dword2 << 32) | element->dword1;
6853 is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6854 is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6855 cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6856 source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6857 err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6859 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6860 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6861 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6862 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6864 /* Find address data */
6865 for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6866 const struct igu_fifo_addr_data *curr_addr =
6867 &s_igu_fifo_addr_data[i];
6869 if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6870 curr_addr->end_addr)
6871 found_addr = curr_addr;
6875 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6877 /* Prepare parsed address data */
6878 switch (found_addr->type) {
6879 case IGU_ADDR_TYPE_MSIX_MEM:
6880 sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6882 case IGU_ADDR_TYPE_WRITE_INT_ACK:
6883 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6884 sprintf(parsed_addr_data,
6885 " SB = 0x%x", cmd_addr - found_addr->start_addr);
6888 parsed_addr_data[0] = '\0';
6892 parsed_wr_data[0] = '\0';
6896 /* Prepare parsed write data */
6897 wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6898 prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6899 is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6901 if (source == IGU_SRC_ATTN) {
6902 sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6905 u8 cleanup_val, cleanup_type;
6909 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6912 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6914 sprintf(parsed_wr_data,
6915 "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6916 cleanup_val ? "set" : "clear",
6919 u8 update_flag, en_dis_int_for_sb, segment;
6922 update_flag = GET_FIELD(wr_data,
6923 IGU_FIFO_WR_DATA_UPDATE_FLAG);
6926 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6927 segment = GET_FIELD(wr_data,
6928 IGU_FIFO_WR_DATA_SEGMENT);
6929 timer_mask = GET_FIELD(wr_data,
6930 IGU_FIFO_WR_DATA_TIMER_MASK);
6932 sprintf(parsed_wr_data,
6933 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
6935 update_flag ? "update" : "nop",
6937 ? (en_dis_int_for_sb == 1 ? "disable" : "nop")
6939 segment ? "attn" : "regular",
6944 /* Add parsed element to parsed buffer */
6945 *results_offset += sprintf(qed_get_buf_ptr(results_buf,
6947 "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
6948 element->dword2, element->dword1,
6950 is_pf ? "pf" : "vf",
6951 GET_FIELD(element->dword0,
6952 IGU_FIFO_ELEMENT_DWORD0_FID),
6953 s_igu_fifo_source_strs[source],
6954 is_wr_cmd ? "wr" : "rd",
6956 (!is_pf && found_addr->vf_desc)
6957 ? found_addr->vf_desc
6961 s_igu_fifo_error_strs[err_type]);
6963 return DBG_STATUS_OK;
6966 /* Parses an IGU FIFO dump buffer.
6967 * If result_buf is not NULL, the IGU FIFO results are printed to it.
6968 * In any case, the required results buffer size is assigned to
6969 * parsed_results_bytes.
6970 * The parsing status is returned.
6972 static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
6974 u32 num_dumped_dwords,
6976 u32 *parsed_results_bytes)
6978 const char *section_name, *param_name, *param_str_val;
6979 u32 param_num_val, num_section_params, num_elements;
6980 struct igu_fifo_element *elements;
6981 enum dbg_status status;
6982 u32 results_offset = 0;
6985 /* Read global_params section */
6986 dump_buf += qed_read_section_hdr(dump_buf,
6987 §ion_name, &num_section_params);
6988 if (strcmp(section_name, "global_params"))
6989 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6991 /* Print global params */
6992 dump_buf += qed_print_section_params(dump_buf,
6994 results_buf, &results_offset);
6996 /* Read igu_fifo_data section */
6997 dump_buf += qed_read_section_hdr(dump_buf,
6998 §ion_name, &num_section_params);
6999 if (strcmp(section_name, "igu_fifo_data"))
7000 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7001 dump_buf += qed_read_param(dump_buf,
7002 ¶m_name, ¶m_str_val, ¶m_num_val);
7003 if (strcmp(param_name, "size"))
7004 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7005 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7006 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7007 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7008 elements = (struct igu_fifo_element *)dump_buf;
7010 /* Decode elements */
7011 for (i = 0; i < num_elements; i++) {
7012 status = qed_parse_igu_fifo_element(&elements[i],
7015 parsed_results_bytes);
7016 if (status != DBG_STATUS_OK)
7020 results_offset += sprintf(qed_get_buf_ptr(results_buf,
7022 "fifo contained %d elements", num_elements);
7024 /* Add 1 for string NULL termination */
7025 *parsed_results_bytes = results_offset + 1;
7027 return DBG_STATUS_OK;
7030 static enum dbg_status
7031 qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
7033 u32 num_dumped_dwords,
7035 u32 *parsed_results_bytes)
7037 const char *section_name, *param_name, *param_str_val;
7038 u32 param_num_val, num_section_params, num_elements;
7039 struct protection_override_element *elements;
7040 u32 results_offset = 0;
7043 /* Read global_params section */
7044 dump_buf += qed_read_section_hdr(dump_buf,
7045 §ion_name, &num_section_params);
7046 if (strcmp(section_name, "global_params"))
7047 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7049 /* Print global params */
7050 dump_buf += qed_print_section_params(dump_buf,
7052 results_buf, &results_offset);
7054 /* Read protection_override_data section */
7055 dump_buf += qed_read_section_hdr(dump_buf,
7056 §ion_name, &num_section_params);
7057 if (strcmp(section_name, "protection_override_data"))
7058 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7059 dump_buf += qed_read_param(dump_buf,
7060 ¶m_name, ¶m_str_val, ¶m_num_val);
7061 if (strcmp(param_name, "size"))
7062 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7063 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7064 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7065 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7066 elements = (struct protection_override_element *)dump_buf;
7068 /* Decode elements */
7069 for (i = 0; i < num_elements; i++) {
7070 u32 address = GET_FIELD(elements[i].data,
7071 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7072 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7075 sprintf(qed_get_buf_ptr(results_buf,
7077 "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7079 (u32)GET_FIELD(elements[i].data,
7080 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7081 (u32)GET_FIELD(elements[i].data,
7082 PROTECTION_OVERRIDE_ELEMENT_READ),
7083 (u32)GET_FIELD(elements[i].data,
7084 PROTECTION_OVERRIDE_ELEMENT_WRITE),
7085 s_protection_strs[GET_FIELD(elements[i].data,
7086 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7087 s_protection_strs[GET_FIELD(elements[i].data,
7088 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7091 results_offset += sprintf(qed_get_buf_ptr(results_buf,
7093 "protection override contained %d elements",
7096 /* Add 1 for string NULL termination */
7097 *parsed_results_bytes = results_offset + 1;
7099 return DBG_STATUS_OK;
7102 /* Parses a FW Asserts dump buffer.
7103 * If result_buf is not NULL, the FW Asserts results are printed to it.
7104 * In any case, the required results buffer size is assigned to
7105 * parsed_results_bytes.
7106 * The parsing status is returned.
7108 static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
7110 u32 num_dumped_dwords,
7112 u32 *parsed_results_bytes)
7114 u32 num_section_params, param_num_val, i, results_offset = 0;
7115 const char *param_name, *param_str_val, *section_name;
7116 bool last_section_found = false;
7118 *parsed_results_bytes = 0;
7120 /* Read global_params section */
7121 dump_buf += qed_read_section_hdr(dump_buf,
7122 §ion_name, &num_section_params);
7123 if (strcmp(section_name, "global_params"))
7124 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7126 /* Print global params */
7127 dump_buf += qed_print_section_params(dump_buf,
7129 results_buf, &results_offset);
7131 while (!last_section_found) {
7132 dump_buf += qed_read_section_hdr(dump_buf,
7134 &num_section_params);
7135 if (!strcmp(section_name, "fw_asserts")) {
7136 /* Extract params */
7137 const char *storm_letter = NULL;
7138 u32 storm_dump_size = 0;
7140 for (i = 0; i < num_section_params; i++) {
7141 dump_buf += qed_read_param(dump_buf,
7145 if (!strcmp(param_name, "storm"))
7146 storm_letter = param_str_val;
7147 else if (!strcmp(param_name, "size"))
7148 storm_dump_size = param_num_val;
7151 DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7154 if (!storm_letter || !storm_dump_size)
7155 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7159 sprintf(qed_get_buf_ptr(results_buf,
7161 "\n%sSTORM_ASSERT: size=%d\n",
7162 storm_letter, storm_dump_size);
7163 for (i = 0; i < storm_dump_size; i++, dump_buf++)
7165 sprintf(qed_get_buf_ptr(results_buf,
7167 "%08x\n", *dump_buf);
7168 } else if (!strcmp(section_name, "last")) {
7169 last_section_found = true;
7171 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7175 /* Add 1 for string NULL termination */
7176 *parsed_results_bytes = results_offset + 1;
7178 return DBG_STATUS_OK;
7181 /***************************** Public Functions *******************************/
7183 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7185 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7188 /* Convert binary data to debug arrays */
7189 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7190 s_user_dbg_arrays[buf_id].ptr =
7191 (u32 *)(bin_ptr + buf_array[buf_id].offset);
7192 s_user_dbg_arrays[buf_id].size_in_dwords =
7193 BYTES_TO_DWORDS(buf_array[buf_id].length);
7196 return DBG_STATUS_OK;
7199 const char *qed_dbg_get_status_str(enum dbg_status status)
7202 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7205 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7207 u32 num_dumped_dwords,
7208 u32 *results_buf_size)
7210 u32 num_errors, num_warnings;
7212 return qed_parse_idle_chk_dump(p_hwfn,
7217 &num_errors, &num_warnings);
7220 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7222 u32 num_dumped_dwords,
7224 u32 *num_errors, u32 *num_warnings)
7226 u32 parsed_buf_size;
7228 return qed_parse_idle_chk_dump(p_hwfn,
7233 num_errors, num_warnings);
7236 void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
7238 s_mcp_trace_meta.ptr = data;
7239 s_mcp_trace_meta.size_in_dwords = size;
7242 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7244 u32 num_dumped_dwords,
7245 u32 *results_buf_size)
7247 return qed_parse_mcp_trace_dump(p_hwfn,
7250 NULL, results_buf_size);
7253 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7255 u32 num_dumped_dwords,
7258 u32 parsed_buf_size;
7260 return qed_parse_mcp_trace_dump(p_hwfn,
7263 results_buf, &parsed_buf_size);
7266 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7268 u32 num_dumped_dwords,
7269 u32 *results_buf_size)
7271 return qed_parse_reg_fifo_dump(p_hwfn,
7274 NULL, results_buf_size);
7277 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7279 u32 num_dumped_dwords,
7282 u32 parsed_buf_size;
7284 return qed_parse_reg_fifo_dump(p_hwfn,
7287 results_buf, &parsed_buf_size);
7290 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7292 u32 num_dumped_dwords,
7293 u32 *results_buf_size)
7295 return qed_parse_igu_fifo_dump(p_hwfn,
7298 NULL, results_buf_size);
7301 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7303 u32 num_dumped_dwords,
7306 u32 parsed_buf_size;
7308 return qed_parse_igu_fifo_dump(p_hwfn,
7311 results_buf, &parsed_buf_size);
7315 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7317 u32 num_dumped_dwords,
7318 u32 *results_buf_size)
7320 return qed_parse_protection_override_dump(p_hwfn,
7323 NULL, results_buf_size);
7326 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7328 u32 num_dumped_dwords,
7331 u32 parsed_buf_size;
7333 return qed_parse_protection_override_dump(p_hwfn,
7340 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7342 u32 num_dumped_dwords,
7343 u32 *results_buf_size)
7345 return qed_parse_fw_asserts_dump(p_hwfn,
7348 NULL, results_buf_size);
7351 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7353 u32 num_dumped_dwords,
7356 u32 parsed_buf_size;
7358 return qed_parse_fw_asserts_dump(p_hwfn,
7361 results_buf, &parsed_buf_size);
7364 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7365 struct dbg_attn_block_result *results)
7367 struct user_dbg_array *block_attn, *pstrings;
7368 const u32 *block_attn_name_offsets;
7369 enum dbg_attn_type attn_type;
7370 const char *block_name;
7373 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7374 attn_type = (enum dbg_attn_type)
7375 GET_FIELD(results->data,
7376 DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7377 block_name = s_block_info_arr[results->block_id].name;
7379 if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7380 !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7381 !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7382 return DBG_STATUS_DBG_ARRAY_NOT_SET;
7384 block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7385 block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7387 /* Go over registers with a non-zero attention status */
7388 for (i = 0; i < num_regs; i++) {
7389 struct dbg_attn_reg_result *reg_result;
7390 struct dbg_attn_bit_mapping *mapping;
7391 u8 num_reg_attn, bit_idx = 0;
7393 reg_result = &results->reg_results[i];
7394 num_reg_attn = GET_FIELD(reg_result->data,
7395 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7396 block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7397 mapping = &((struct dbg_attn_bit_mapping *)
7398 block_attn->ptr)[reg_result->block_attn_offset];
7400 pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7402 /* Go over attention status bits */
7403 for (j = 0; j < num_reg_attn; j++) {
7404 u16 attn_idx_val = GET_FIELD(mapping[j].data,
7405 DBG_ATTN_BIT_MAPPING_VAL);
7406 const char *attn_name, *attn_type_str, *masked_str;
7407 u32 name_offset, sts_addr;
7409 /* Check if bit mask should be advanced (due to unused
7412 if (GET_FIELD(mapping[j].data,
7413 DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7414 bit_idx += (u8)attn_idx_val;
7418 /* Check current bit index */
7419 if (!(reg_result->sts_val & BIT(bit_idx))) {
7424 /* Find attention name */
7425 name_offset = block_attn_name_offsets[attn_idx_val];
7426 attn_name = &((const char *)
7427 pstrings->ptr)[name_offset];
7428 attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7429 "Interrupt" : "Parity";
7430 masked_str = reg_result->mask_val & BIT(bit_idx) ?
7432 sts_addr = GET_FIELD(reg_result->data,
7433 DBG_ATTN_REG_RESULT_STS_ADDRESS);
7435 "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7436 block_name, attn_type_str, attn_name,
7437 sts_addr, bit_idx, masked_str);
7443 return DBG_STATUS_OK;
7446 /* Wrapper for unifying the idle_chk and mcp_trace api */
7447 static enum dbg_status
7448 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7450 u32 num_dumped_dwords,
7453 u32 num_errors, num_warnnings;
7455 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7456 results_buf, &num_errors,
7460 /* Feature meta data lookup table */
7463 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7464 struct qed_ptt *p_ptt, u32 *size);
7465 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7466 struct qed_ptt *p_ptt, u32 *dump_buf,
7467 u32 buf_size, u32 *dumped_dwords);
7468 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7469 u32 *dump_buf, u32 num_dumped_dwords,
7471 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7473 u32 num_dumped_dwords,
7474 u32 *results_buf_size);
7475 } qed_features_lookup[] = {
7477 "grc", qed_dbg_grc_get_dump_buf_size,
7478 qed_dbg_grc_dump, NULL, NULL}, {
7480 qed_dbg_idle_chk_get_dump_buf_size,
7481 qed_dbg_idle_chk_dump,
7482 qed_print_idle_chk_results_wrapper,
7483 qed_get_idle_chk_results_buf_size}, {
7485 qed_dbg_mcp_trace_get_dump_buf_size,
7486 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7487 qed_get_mcp_trace_results_buf_size}, {
7489 qed_dbg_reg_fifo_get_dump_buf_size,
7490 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7491 qed_get_reg_fifo_results_buf_size}, {
7493 qed_dbg_igu_fifo_get_dump_buf_size,
7494 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7495 qed_get_igu_fifo_results_buf_size}, {
7496 "protection_override",
7497 qed_dbg_protection_override_get_dump_buf_size,
7498 qed_dbg_protection_override_dump,
7499 qed_print_protection_override_results,
7500 qed_get_protection_override_results_buf_size}, {
7502 qed_dbg_fw_asserts_get_dump_buf_size,
7503 qed_dbg_fw_asserts_dump,
7504 qed_print_fw_asserts_results,
7505 qed_get_fw_asserts_results_buf_size},};
7507 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7509 u32 i, precision = 80;
7514 pr_notice("\n%.*s", precision, p_text_buf);
7515 for (i = precision; i < text_size; i += precision)
7516 pr_cont("%.*s", precision, p_text_buf + i);
7520 #define QED_RESULTS_BUF_MIN_SIZE 16
7521 /* Generic function for decoding debug feature info */
7522 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7523 enum qed_dbg_features feature_idx)
7525 struct qed_dbg_feature *feature =
7526 &p_hwfn->cdev->dbg_params.features[feature_idx];
7527 u32 text_size_bytes, null_char_pos, i;
7531 /* Check if feature supports formatting capability */
7532 if (!qed_features_lookup[feature_idx].results_buf_size)
7533 return DBG_STATUS_OK;
7535 /* Obtain size of formatted output */
7536 rc = qed_features_lookup[feature_idx].
7537 results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7538 feature->dumped_dwords, &text_size_bytes);
7539 if (rc != DBG_STATUS_OK)
7542 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
7543 null_char_pos = text_size_bytes - 1;
7544 text_size_bytes = (text_size_bytes + 3) & ~0x3;
7546 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7547 DP_NOTICE(p_hwfn->cdev,
7548 "formatted size of feature was too small %d. Aborting\n",
7550 return DBG_STATUS_INVALID_ARGS;
7553 /* Allocate temp text buf */
7554 text_buf = vzalloc(text_size_bytes);
7556 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7558 /* Decode feature opcodes to string on temp buf */
7559 rc = qed_features_lookup[feature_idx].
7560 print_results(p_hwfn, (u32 *)feature->dump_buf,
7561 feature->dumped_dwords, text_buf);
7562 if (rc != DBG_STATUS_OK) {
7567 /* Replace the original null character with a '\n' character.
7568 * The bytes that were added as a result of the dword alignment are also
7569 * padded with '\n' characters.
7571 for (i = null_char_pos; i < text_size_bytes; i++)
7574 /* Dump printable feature to log */
7575 if (p_hwfn->cdev->dbg_params.print_data)
7576 qed_dbg_print_feature(text_buf, text_size_bytes);
7578 /* Free the old dump_buf and point the dump_buf to the newly allocagted
7579 * and formatted text buffer.
7581 vfree(feature->dump_buf);
7582 feature->dump_buf = text_buf;
7583 feature->buf_size = text_size_bytes;
7584 feature->dumped_dwords = text_size_bytes / 4;
7588 /* Generic function for performing the dump of a debug feature. */
7589 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7590 struct qed_ptt *p_ptt,
7591 enum qed_dbg_features feature_idx)
7593 struct qed_dbg_feature *feature =
7594 &p_hwfn->cdev->dbg_params.features[feature_idx];
7595 u32 buf_size_dwords;
7598 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7599 qed_features_lookup[feature_idx].name);
7601 /* Dump_buf was already allocated need to free (this can happen if dump
7602 * was called but file was never read).
7603 * We can't use the buffer as is since size may have changed.
7605 if (feature->dump_buf) {
7606 vfree(feature->dump_buf);
7607 feature->dump_buf = NULL;
7610 /* Get buffer size from hsi, allocate accordingly, and perform the
7613 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7615 if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7617 feature->buf_size = buf_size_dwords * sizeof(u32);
7618 feature->dump_buf = vmalloc(feature->buf_size);
7619 if (!feature->dump_buf)
7620 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7622 rc = qed_features_lookup[feature_idx].
7623 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7624 feature->buf_size / sizeof(u32),
7625 &feature->dumped_dwords);
7627 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7628 * In this case the buffer holds valid binary data, but we wont able
7629 * to parse it (since parsing relies on data in NVRAM which is only
7630 * accessible when MFW is responsive). skip the formatting but return
7631 * success so that binary data is provided.
7633 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7634 return DBG_STATUS_OK;
7636 if (rc != DBG_STATUS_OK)
7640 rc = format_feature(p_hwfn, feature_idx);
7644 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7646 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7649 int qed_dbg_grc_size(struct qed_dev *cdev)
7651 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7654 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7656 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7660 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7662 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7665 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7667 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7671 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7673 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7676 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7678 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7682 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7684 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7687 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7688 u32 *num_dumped_bytes)
7690 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7694 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7696 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7699 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7700 u32 *num_dumped_bytes)
7702 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7706 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7708 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7711 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7712 u32 *num_dumped_bytes)
7714 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7718 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7720 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7723 /* Defines the amount of bytes allocated for recording the length of debugfs
7726 #define REGDUMP_HEADER_SIZE sizeof(u32)
7727 #define REGDUMP_HEADER_FEATURE_SHIFT 24
7728 #define REGDUMP_HEADER_ENGINE_SHIFT 31
7729 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
7730 enum debug_print_features {
7736 PROTECTION_OVERRIDE = 5,
7742 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7743 int engine, u32 feature_size, u8 omit_engine)
7745 /* Insert the engine, feature and mode inside the header and combine it
7746 * with feature size.
7748 return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7749 (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7750 (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7753 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7755 u8 cur_engine, omit_engine = 0, org_engine;
7756 u32 offset = 0, feature_size;
7759 if (cdev->num_hwfns == 1)
7762 org_engine = qed_get_debug_engine(cdev);
7763 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7764 /* Collect idle_chks and grcDump for each hw function */
7765 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7766 "obtaining idle_chk and grcdump for current engine\n");
7767 qed_set_debug_engine(cdev, cur_engine);
7769 /* First idle_chk */
7770 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7771 REGDUMP_HEADER_SIZE, &feature_size);
7773 *(u32 *)((u8 *)buffer + offset) =
7774 qed_calc_regdump_header(IDLE_CHK, cur_engine,
7775 feature_size, omit_engine);
7776 offset += (feature_size + REGDUMP_HEADER_SIZE);
7778 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7781 /* Second idle_chk */
7782 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7783 REGDUMP_HEADER_SIZE, &feature_size);
7785 *(u32 *)((u8 *)buffer + offset) =
7786 qed_calc_regdump_header(IDLE_CHK, cur_engine,
7787 feature_size, omit_engine);
7788 offset += (feature_size + REGDUMP_HEADER_SIZE);
7790 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7794 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7795 REGDUMP_HEADER_SIZE, &feature_size);
7797 *(u32 *)((u8 *)buffer + offset) =
7798 qed_calc_regdump_header(REG_FIFO, cur_engine,
7799 feature_size, omit_engine);
7800 offset += (feature_size + REGDUMP_HEADER_SIZE);
7802 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7806 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7807 REGDUMP_HEADER_SIZE, &feature_size);
7809 *(u32 *)((u8 *)buffer + offset) =
7810 qed_calc_regdump_header(IGU_FIFO, cur_engine,
7811 feature_size, omit_engine);
7812 offset += (feature_size + REGDUMP_HEADER_SIZE);
7814 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7817 /* protection_override dump */
7818 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7819 REGDUMP_HEADER_SIZE,
7822 *(u32 *)((u8 *)buffer + offset) =
7823 qed_calc_regdump_header(PROTECTION_OVERRIDE,
7825 feature_size, omit_engine);
7826 offset += (feature_size + REGDUMP_HEADER_SIZE);
7829 "qed_dbg_protection_override failed. rc = %d\n",
7833 /* fw_asserts dump */
7834 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7835 REGDUMP_HEADER_SIZE, &feature_size);
7837 *(u32 *)((u8 *)buffer + offset) =
7838 qed_calc_regdump_header(FW_ASSERTS, cur_engine,
7839 feature_size, omit_engine);
7840 offset += (feature_size + REGDUMP_HEADER_SIZE);
7842 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7846 /* GRC dump - must be last because when mcp stuck it will
7847 * clutter idle_chk, reg_fifo, ...
7849 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7850 REGDUMP_HEADER_SIZE, &feature_size);
7852 *(u32 *)((u8 *)buffer + offset) =
7853 qed_calc_regdump_header(GRC_DUMP, cur_engine,
7854 feature_size, omit_engine);
7855 offset += (feature_size + REGDUMP_HEADER_SIZE);
7857 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7862 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7863 REGDUMP_HEADER_SIZE, &feature_size);
7865 *(u32 *)((u8 *)buffer + offset) =
7866 qed_calc_regdump_header(MCP_TRACE, cur_engine,
7867 feature_size, omit_engine);
7868 offset += (feature_size + REGDUMP_HEADER_SIZE);
7870 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7873 qed_set_debug_engine(cdev, org_engine);
7878 int qed_dbg_all_data_size(struct qed_dev *cdev)
7880 u8 cur_engine, org_engine;
7883 org_engine = qed_get_debug_engine(cdev);
7884 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7885 /* Engine specific */
7886 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7887 "calculating idle_chk and grcdump register length for current engine\n");
7888 qed_set_debug_engine(cdev, cur_engine);
7889 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7890 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7891 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
7892 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
7893 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
7894 REGDUMP_HEADER_SIZE +
7895 qed_dbg_protection_override_size(cdev) +
7896 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
7900 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
7901 qed_set_debug_engine(cdev, org_engine);
7906 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
7907 enum qed_dbg_features feature, u32 *num_dumped_bytes)
7909 struct qed_hwfn *p_hwfn =
7910 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
7911 struct qed_dbg_feature *qed_feature =
7912 &cdev->dbg_params.features[feature];
7913 enum dbg_status dbg_rc;
7914 struct qed_ptt *p_ptt;
7918 p_ptt = qed_ptt_acquire(p_hwfn);
7923 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
7924 if (dbg_rc != DBG_STATUS_OK) {
7925 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
7926 qed_dbg_get_status_str(dbg_rc));
7927 *num_dumped_bytes = 0;
7932 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7933 "copying debugfs feature to external buffer\n");
7934 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
7935 *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
7939 qed_ptt_release(p_hwfn, p_ptt);
7943 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
7945 struct qed_hwfn *p_hwfn =
7946 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
7947 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
7948 struct qed_dbg_feature *qed_feature =
7949 &cdev->dbg_params.features[feature];
7950 u32 buf_size_dwords;
7956 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
7958 if (rc != DBG_STATUS_OK)
7959 buf_size_dwords = 0;
7961 qed_ptt_release(p_hwfn, p_ptt);
7962 qed_feature->buf_size = buf_size_dwords * sizeof(u32);
7963 return qed_feature->buf_size;
7966 u8 qed_get_debug_engine(struct qed_dev *cdev)
7968 return cdev->dbg_params.engine_for_debug;
7971 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
7973 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
7975 cdev->dbg_params.engine_for_debug = engine_number;
7978 void qed_dbg_pf_init(struct qed_dev *cdev)
7980 const u8 *dbg_values;
7982 /* Debug values are after init values.
7983 * The offset is the first dword of the file.
7985 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
7986 qed_dbg_set_bin_ptr((u8 *)dbg_values);
7987 qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
7990 void qed_dbg_pf_exit(struct qed_dev *cdev)
7992 struct qed_dbg_feature *feature = NULL;
7993 enum qed_dbg_features feature_idx;
7995 /* Debug features' buffers may be allocated if debug feature was used
7996 * but dump wasn't called.
7998 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
7999 feature = &cdev->dbg_params.features[feature_idx];
8000 if (feature->dump_buf) {
8001 vfree(feature->dump_buf);
8002 feature->dump_buf = NULL;