1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
16 #include "qed_reg_addr.h"
25 /* Memory groups enum */
43 MEM_GROUP_CONN_CFC_MEM,
44 MEM_GROUP_TASK_CFC_MEM,
58 /* Memory groups names */
59 static const char * const s_mem_group_names[] = {
90 /* Idle check conditions */
91 static u32 cond4(const u32 *r, const u32 *imm)
93 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
96 static u32 cond6(const u32 *r, const u32 *imm)
98 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
101 static u32 cond5(const u32 *r, const u32 *imm)
103 return (r[0] & imm[0]) != imm[1];
106 static u32 cond8(const u32 *r, const u32 *imm)
108 return ((r[0] & imm[0]) >> imm[1]) !=
109 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
112 static u32 cond9(const u32 *r, const u32 *imm)
114 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
117 static u32 cond1(const u32 *r, const u32 *imm)
119 return (r[0] & ~imm[0]) != imm[1];
122 static u32 cond0(const u32 *r, const u32 *imm)
124 return r[0] != imm[0];
127 static u32 cond10(const u32 *r, const u32 *imm)
129 return r[0] != r[1] && r[2] == imm[0];
132 static u32 cond11(const u32 *r, const u32 *imm)
134 return r[0] != r[1] && r[2] > imm[0];
137 static u32 cond3(const u32 *r, const u32 *imm)
142 static u32 cond12(const u32 *r, const u32 *imm)
144 return r[0] & imm[0];
147 static u32 cond7(const u32 *r, const u32 *imm)
149 return r[0] < (r[1] - imm[0]);
152 static u32 cond2(const u32 *r, const u32 *imm)
154 return r[0] > imm[0];
157 /* Array of Idle Check conditions */
158 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
174 /******************************* Data Types **********************************/
189 struct chip_platform_defs {
195 /* Chip constant definitions */
198 struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
201 /* Platform constant definitions */
202 struct platform_defs {
207 /* Storm constant definitions */
210 enum block_id block_id;
211 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
213 u32 sem_fast_mem_addr;
214 u32 sem_frame_mode_addr;
215 u32 sem_slow_enable_addr;
216 u32 sem_slow_mode_addr;
217 u32 sem_slow_mode1_conf_addr;
218 u32 sem_sync_dbg_empty_addr;
219 u32 sem_slow_dbg_empty_addr;
221 u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
222 u32 cm_conn_ag_ctx_rd_addr;
223 u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
224 u32 cm_conn_st_ctx_rd_addr;
225 u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
226 u32 cm_task_ag_ctx_rd_addr;
227 u32 cm_task_st_ctx_lid_size; /* In quad-regs */
228 u32 cm_task_st_ctx_rd_addr;
231 /* Block constant definitions */
234 bool has_dbg_bus[MAX_CHIP_IDS];
235 bool associated_to_storm;
236 u32 storm_id; /* Valid only if associated_to_storm is true */
237 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
239 u32 dbg_cycle_enable_addr;
241 u32 dbg_force_valid_addr;
242 u32 dbg_force_frame_addr;
244 bool unreset; /* If true, the block is taken out of reset before dump */
245 enum dbg_reset_regs reset_reg;
246 u8 reset_bit_offset; /* Bit offset in reset register */
249 /* Reset register definitions */
250 struct reset_reg_defs {
253 bool exists[MAX_CHIP_IDS];
256 struct grc_param_defs {
257 u32 default_val[MAX_CHIP_IDS];
261 u32 exclude_all_preset_val;
262 u32 crash_preset_val;
265 struct rss_mem_defs {
266 const char *mem_name;
267 const char *type_name;
268 u32 addr; /* In 128b units */
269 u32 num_entries[MAX_CHIP_IDS];
270 u32 entry_width[MAX_CHIP_IDS]; /* In bits */
273 struct vfc_ram_defs {
274 const char *mem_name;
275 const char *type_name;
280 struct big_ram_defs {
281 const char *instance_name;
282 enum mem_groups mem_group_id;
283 enum mem_groups ram_mem_group_id;
284 enum dbg_grc_params grc_param;
287 u32 num_of_blocks[MAX_CHIP_IDS];
291 const char *phy_name;
293 u32 tbus_addr_lo_addr;
294 u32 tbus_addr_hi_addr;
295 u32 tbus_data_lo_addr;
296 u32 tbus_data_hi_addr;
299 /******************************** Constants **********************************/
301 #define MAX_LCIDS 320
302 #define MAX_LTIDS 320
303 #define NUM_IOR_SETS 2
304 #define IORS_PER_SET 176
305 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
306 #define BYTES_IN_DWORD sizeof(u32)
308 /* In the macros below, size and offset are specified in bits */
309 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
310 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
311 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
312 #define FIELD_DWORD_OFFSET(type, field) \
313 (int)(FIELD_BIT_OFFSET(type, field) / 32)
314 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
315 #define FIELD_BIT_MASK(type, field) \
316 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
317 FIELD_DWORD_SHIFT(type, field))
318 #define SET_VAR_FIELD(var, type, field, val) \
320 var[FIELD_DWORD_OFFSET(type, field)] &= \
321 (~FIELD_BIT_MASK(type, field)); \
322 var[FIELD_DWORD_OFFSET(type, field)] |= \
323 (val) << FIELD_DWORD_SHIFT(type, field); \
325 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
327 for (i = 0; i < (arr_size); i++) \
328 qed_wr(dev, ptt, addr, (arr)[i]); \
330 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
332 for (i = 0; i < (arr_size); i++) \
333 (arr)[i] = qed_rd(dev, ptt, addr); \
336 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
337 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
338 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
339 #define RAM_LINES_TO_BYTES(lines) \
340 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
341 #define REG_DUMP_LEN_SHIFT 24
342 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
343 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
344 #define IDLE_CHK_RULE_SIZE_DWORDS \
345 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
346 #define IDLE_CHK_RESULT_HDR_DWORDS \
347 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
348 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
349 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
350 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
352 /* The sizes and offsets below are specified in bits */
353 #define VFC_CAM_CMD_STRUCT_SIZE 64
354 #define VFC_CAM_CMD_ROW_OFFSET 48
355 #define VFC_CAM_CMD_ROW_SIZE 9
356 #define VFC_CAM_ADDR_STRUCT_SIZE 16
357 #define VFC_CAM_ADDR_OP_OFFSET 0
358 #define VFC_CAM_ADDR_OP_SIZE 4
359 #define VFC_CAM_RESP_STRUCT_SIZE 256
360 #define VFC_RAM_ADDR_STRUCT_SIZE 16
361 #define VFC_RAM_ADDR_OP_OFFSET 0
362 #define VFC_RAM_ADDR_OP_SIZE 2
363 #define VFC_RAM_ADDR_ROW_OFFSET 2
364 #define VFC_RAM_ADDR_ROW_SIZE 10
365 #define VFC_RAM_RESP_STRUCT_SIZE 256
366 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
367 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
368 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
369 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
370 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
371 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
372 #define NUM_VFC_RAM_TYPES 4
373 #define VFC_CAM_NUM_ROWS 512
374 #define VFC_OPCODE_CAM_RD 14
375 #define VFC_OPCODE_RAM_RD 0
376 #define NUM_RSS_MEM_TYPES 5
377 #define NUM_BIG_RAM_TYPES 3
378 #define BIG_RAM_BLOCK_SIZE_BYTES 128
379 #define BIG_RAM_BLOCK_SIZE_DWORDS \
380 BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
381 #define NUM_PHY_TBUS_ADDRESSES 2048
382 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
383 #define RESET_REG_UNRESET_OFFSET 4
384 #define STALL_DELAY_MS 500
385 #define STATIC_DEBUG_LINE_DWORDS 9
386 #define NUM_DBG_BUS_LINES 256
387 #define NUM_COMMON_GLOBAL_PARAMS 8
388 #define FW_IMG_MAIN 1
389 #define REG_FIFO_DEPTH_ELEMENTS 32
390 #define REG_FIFO_ELEMENT_DWORDS 2
391 #define REG_FIFO_DEPTH_DWORDS \
392 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
393 #define IGU_FIFO_DEPTH_ELEMENTS 64
394 #define IGU_FIFO_ELEMENT_DWORDS 4
395 #define IGU_FIFO_DEPTH_DWORDS \
396 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
397 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
398 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
399 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
400 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
401 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
402 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
404 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
405 #define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
406 #define EMPTY_FW_VERSION_STR "???_???_???_???"
407 #define EMPTY_FW_IMAGE_STR "???????????????"
409 /***************************** Constant Arrays *******************************/
412 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
414 /* Chip constant definitions array */
415 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
417 { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
418 {0, 0, 0}, {0, 0, 0} } },
420 { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
421 {0, 0, 0}, {0, 0, 0} } }
424 /* Storm constant definitions array */
425 static struct storm_defs s_storm_defs[] = {
428 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
429 TSEM_REG_FAST_MEMORY,
430 TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
431 TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
432 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
433 TCM_REG_CTX_RBC_ACCS,
434 4, TCM_REG_AGG_CON_CTX,
435 16, TCM_REG_SM_CON_CTX,
436 2, TCM_REG_AGG_TASK_CTX,
437 4, TCM_REG_SM_TASK_CTX},
440 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
441 MSEM_REG_FAST_MEMORY,
442 MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
443 MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
444 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
445 MCM_REG_CTX_RBC_ACCS,
446 1, MCM_REG_AGG_CON_CTX,
447 10, MCM_REG_SM_CON_CTX,
448 2, MCM_REG_AGG_TASK_CTX,
449 7, MCM_REG_SM_TASK_CTX},
452 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
453 USEM_REG_FAST_MEMORY,
454 USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
455 USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
456 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
457 UCM_REG_CTX_RBC_ACCS,
458 2, UCM_REG_AGG_CON_CTX,
459 13, UCM_REG_SM_CON_CTX,
460 3, UCM_REG_AGG_TASK_CTX,
461 3, UCM_REG_SM_TASK_CTX},
464 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
465 XSEM_REG_FAST_MEMORY,
466 XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
467 XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
468 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
469 XCM_REG_CTX_RBC_ACCS,
470 9, XCM_REG_AGG_CON_CTX,
471 15, XCM_REG_SM_CON_CTX,
476 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
477 YSEM_REG_FAST_MEMORY,
478 YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
479 YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
480 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
481 YCM_REG_CTX_RBC_ACCS,
482 2, YCM_REG_AGG_CON_CTX,
483 3, YCM_REG_SM_CON_CTX,
484 2, YCM_REG_AGG_TASK_CTX,
485 12, YCM_REG_SM_TASK_CTX},
488 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
489 PSEM_REG_FAST_MEMORY,
490 PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
491 PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
492 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
493 PCM_REG_CTX_RBC_ACCS,
495 10, PCM_REG_SM_CON_CTX,
500 /* Block definitions array */
501 static struct block_defs block_grc_defs = {
503 {true, true}, false, 0,
504 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
505 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
506 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
507 GRC_REG_DBG_FORCE_FRAME,
508 true, false, DBG_RESET_REG_MISC_PL_UA, 1
511 static struct block_defs block_miscs_defs = {
512 "miscs", {false, false}, false, 0,
513 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
515 false, false, MAX_DBG_RESET_REGS, 0
518 static struct block_defs block_misc_defs = {
519 "misc", {false, false}, false, 0,
520 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
522 false, false, MAX_DBG_RESET_REGS, 0
525 static struct block_defs block_dbu_defs = {
526 "dbu", {false, false}, false, 0,
527 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
529 false, false, MAX_DBG_RESET_REGS, 0
532 static struct block_defs block_pglue_b_defs = {
534 {true, true}, false, 0,
535 {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
536 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
537 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
538 PGLUE_B_REG_DBG_FORCE_FRAME,
539 true, false, DBG_RESET_REG_MISCS_PL_HV, 1
542 static struct block_defs block_cnig_defs = {
544 {false, true}, false, 0,
545 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
546 CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
547 CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
548 CNIG_REG_DBG_FORCE_FRAME_K2,
549 true, false, DBG_RESET_REG_MISCS_PL_HV, 0
552 static struct block_defs block_cpmu_defs = {
553 "cpmu", {false, false}, false, 0,
554 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
556 true, false, DBG_RESET_REG_MISCS_PL_HV, 8
559 static struct block_defs block_ncsi_defs = {
561 {true, true}, false, 0,
562 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
563 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
564 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
565 NCSI_REG_DBG_FORCE_FRAME,
566 true, false, DBG_RESET_REG_MISCS_PL_HV, 5
569 static struct block_defs block_opte_defs = {
570 "opte", {false, false}, false, 0,
571 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
573 true, false, DBG_RESET_REG_MISCS_PL_HV, 4
576 static struct block_defs block_bmb_defs = {
578 {true, true}, false, 0,
579 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
580 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
581 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
582 BMB_REG_DBG_FORCE_FRAME,
583 true, false, DBG_RESET_REG_MISCS_PL_UA, 7
586 static struct block_defs block_pcie_defs = {
588 {false, true}, false, 0,
589 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
590 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
591 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
592 PCIE_REG_DBG_COMMON_FORCE_FRAME,
593 false, false, MAX_DBG_RESET_REGS, 0
596 static struct block_defs block_mcp_defs = {
597 "mcp", {false, false}, false, 0,
598 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
600 false, false, MAX_DBG_RESET_REGS, 0
603 static struct block_defs block_mcp2_defs = {
605 {true, true}, false, 0,
606 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
607 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
608 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
609 MCP2_REG_DBG_FORCE_FRAME,
610 false, false, MAX_DBG_RESET_REGS, 0
613 static struct block_defs block_pswhst_defs = {
615 {true, true}, false, 0,
616 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
617 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
618 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
619 PSWHST_REG_DBG_FORCE_FRAME,
620 true, false, DBG_RESET_REG_MISC_PL_HV, 0
623 static struct block_defs block_pswhst2_defs = {
625 {true, true}, false, 0,
626 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
627 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
628 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
629 PSWHST2_REG_DBG_FORCE_FRAME,
630 true, false, DBG_RESET_REG_MISC_PL_HV, 0
633 static struct block_defs block_pswrd_defs = {
635 {true, true}, false, 0,
636 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
637 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
638 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
639 PSWRD_REG_DBG_FORCE_FRAME,
640 true, false, DBG_RESET_REG_MISC_PL_HV, 2
643 static struct block_defs block_pswrd2_defs = {
645 {true, true}, false, 0,
646 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
647 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
648 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
649 PSWRD2_REG_DBG_FORCE_FRAME,
650 true, false, DBG_RESET_REG_MISC_PL_HV, 2
653 static struct block_defs block_pswwr_defs = {
655 {true, true}, false, 0,
656 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
657 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
658 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
659 PSWWR_REG_DBG_FORCE_FRAME,
660 true, false, DBG_RESET_REG_MISC_PL_HV, 3
663 static struct block_defs block_pswwr2_defs = {
664 "pswwr2", {false, false}, false, 0,
665 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
667 true, false, DBG_RESET_REG_MISC_PL_HV, 3
670 static struct block_defs block_pswrq_defs = {
672 {true, true}, false, 0,
673 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
674 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
675 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
676 PSWRQ_REG_DBG_FORCE_FRAME,
677 true, false, DBG_RESET_REG_MISC_PL_HV, 1
680 static struct block_defs block_pswrq2_defs = {
682 {true, true}, false, 0,
683 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
684 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
685 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
686 PSWRQ2_REG_DBG_FORCE_FRAME,
687 true, false, DBG_RESET_REG_MISC_PL_HV, 1
690 static struct block_defs block_pglcs_defs = {
692 {false, true}, false, 0,
693 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
694 PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
695 PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
696 PGLCS_REG_DBG_FORCE_FRAME,
697 true, false, DBG_RESET_REG_MISCS_PL_HV, 2
700 static struct block_defs block_ptu_defs = {
702 {true, true}, false, 0,
703 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
704 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
705 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
706 PTU_REG_DBG_FORCE_FRAME,
707 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
710 static struct block_defs block_dmae_defs = {
712 {true, true}, false, 0,
713 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
714 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
715 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
716 DMAE_REG_DBG_FORCE_FRAME,
717 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
720 static struct block_defs block_tcm_defs = {
722 {true, true}, true, DBG_TSTORM_ID,
723 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
724 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
725 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
726 TCM_REG_DBG_FORCE_FRAME,
727 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
730 static struct block_defs block_mcm_defs = {
732 {true, true}, true, DBG_MSTORM_ID,
733 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
734 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
735 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
736 MCM_REG_DBG_FORCE_FRAME,
737 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
740 static struct block_defs block_ucm_defs = {
742 {true, true}, true, DBG_USTORM_ID,
743 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
744 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
745 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
746 UCM_REG_DBG_FORCE_FRAME,
747 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
750 static struct block_defs block_xcm_defs = {
752 {true, true}, true, DBG_XSTORM_ID,
753 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
754 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
755 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
756 XCM_REG_DBG_FORCE_FRAME,
757 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
760 static struct block_defs block_ycm_defs = {
762 {true, true}, true, DBG_YSTORM_ID,
763 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
764 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
765 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
766 YCM_REG_DBG_FORCE_FRAME,
767 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
770 static struct block_defs block_pcm_defs = {
772 {true, true}, true, DBG_PSTORM_ID,
773 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
774 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
775 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
776 PCM_REG_DBG_FORCE_FRAME,
777 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
780 static struct block_defs block_qm_defs = {
782 {true, true}, false, 0,
783 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
784 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
785 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
786 QM_REG_DBG_FORCE_FRAME,
787 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
790 static struct block_defs block_tm_defs = {
792 {true, true}, false, 0,
793 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
794 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
795 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
796 TM_REG_DBG_FORCE_FRAME,
797 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
800 static struct block_defs block_dorq_defs = {
802 {true, true}, false, 0,
803 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
804 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
805 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
806 DORQ_REG_DBG_FORCE_FRAME,
807 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
810 static struct block_defs block_brb_defs = {
812 {true, true}, false, 0,
813 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
814 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
815 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
816 BRB_REG_DBG_FORCE_FRAME,
817 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
820 static struct block_defs block_src_defs = {
822 {true, true}, false, 0,
823 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
824 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
825 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
826 SRC_REG_DBG_FORCE_FRAME,
827 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
830 static struct block_defs block_prs_defs = {
832 {true, true}, false, 0,
833 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
834 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
835 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
836 PRS_REG_DBG_FORCE_FRAME,
837 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
840 static struct block_defs block_tsdm_defs = {
842 {true, true}, true, DBG_TSTORM_ID,
843 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
844 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
845 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
846 TSDM_REG_DBG_FORCE_FRAME,
847 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
850 static struct block_defs block_msdm_defs = {
852 {true, true}, true, DBG_MSTORM_ID,
853 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
854 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
855 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
856 MSDM_REG_DBG_FORCE_FRAME,
857 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
860 static struct block_defs block_usdm_defs = {
862 {true, true}, true, DBG_USTORM_ID,
863 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
864 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
865 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
866 USDM_REG_DBG_FORCE_FRAME,
867 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
870 static struct block_defs block_xsdm_defs = {
872 {true, true}, true, DBG_XSTORM_ID,
873 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
874 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
875 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
876 XSDM_REG_DBG_FORCE_FRAME,
877 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
880 static struct block_defs block_ysdm_defs = {
882 {true, true}, true, DBG_YSTORM_ID,
883 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
884 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
885 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
886 YSDM_REG_DBG_FORCE_FRAME,
887 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
890 static struct block_defs block_psdm_defs = {
892 {true, true}, true, DBG_PSTORM_ID,
893 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
894 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
895 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
896 PSDM_REG_DBG_FORCE_FRAME,
897 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
900 static struct block_defs block_tsem_defs = {
902 {true, true}, true, DBG_TSTORM_ID,
903 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
904 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
905 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
906 TSEM_REG_DBG_FORCE_FRAME,
907 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
910 static struct block_defs block_msem_defs = {
912 {true, true}, true, DBG_MSTORM_ID,
913 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
914 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
915 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
916 MSEM_REG_DBG_FORCE_FRAME,
917 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
920 static struct block_defs block_usem_defs = {
922 {true, true}, true, DBG_USTORM_ID,
923 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
924 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
925 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
926 USEM_REG_DBG_FORCE_FRAME,
927 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
930 static struct block_defs block_xsem_defs = {
932 {true, true}, true, DBG_XSTORM_ID,
933 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
934 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
935 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
936 XSEM_REG_DBG_FORCE_FRAME,
937 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
940 static struct block_defs block_ysem_defs = {
942 {true, true}, true, DBG_YSTORM_ID,
943 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
944 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
945 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
946 YSEM_REG_DBG_FORCE_FRAME,
947 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
950 static struct block_defs block_psem_defs = {
952 {true, true}, true, DBG_PSTORM_ID,
953 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
954 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
955 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
956 PSEM_REG_DBG_FORCE_FRAME,
957 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
960 static struct block_defs block_rss_defs = {
962 {true, true}, false, 0,
963 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
964 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
965 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
966 RSS_REG_DBG_FORCE_FRAME,
967 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
970 static struct block_defs block_tmld_defs = {
972 {true, true}, false, 0,
973 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
974 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
975 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
976 TMLD_REG_DBG_FORCE_FRAME,
977 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
980 static struct block_defs block_muld_defs = {
982 {true, true}, false, 0,
983 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
984 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
985 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
986 MULD_REG_DBG_FORCE_FRAME,
987 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
990 static struct block_defs block_yuld_defs = {
992 {true, true}, false, 0,
993 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
994 YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
995 YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
996 YULD_REG_DBG_FORCE_FRAME,
997 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
1000 static struct block_defs block_xyld_defs = {
1002 {true, true}, false, 0,
1003 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1004 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1005 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1006 XYLD_REG_DBG_FORCE_FRAME,
1007 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1010 static struct block_defs block_prm_defs = {
1012 {true, true}, false, 0,
1013 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1014 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1015 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1016 PRM_REG_DBG_FORCE_FRAME,
1017 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1020 static struct block_defs block_pbf_pb1_defs = {
1022 {true, true}, false, 0,
1023 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1024 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1025 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1026 PBF_PB1_REG_DBG_FORCE_FRAME,
1027 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1031 static struct block_defs block_pbf_pb2_defs = {
1033 {true, true}, false, 0,
1034 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1035 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1036 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1037 PBF_PB2_REG_DBG_FORCE_FRAME,
1038 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1042 static struct block_defs block_rpb_defs = {
1044 {true, true}, false, 0,
1045 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1046 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1047 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1048 RPB_REG_DBG_FORCE_FRAME,
1049 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1052 static struct block_defs block_btb_defs = {
1054 {true, true}, false, 0,
1055 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
1056 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1057 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1058 BTB_REG_DBG_FORCE_FRAME,
1059 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1062 static struct block_defs block_pbf_defs = {
1064 {true, true}, false, 0,
1065 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1066 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1067 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1068 PBF_REG_DBG_FORCE_FRAME,
1069 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1072 static struct block_defs block_rdif_defs = {
1074 {true, true}, false, 0,
1075 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1076 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1077 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1078 RDIF_REG_DBG_FORCE_FRAME,
1079 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1082 static struct block_defs block_tdif_defs = {
1084 {true, true}, false, 0,
1085 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1086 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1087 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1088 TDIF_REG_DBG_FORCE_FRAME,
1089 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1092 static struct block_defs block_cdu_defs = {
1094 {true, true}, false, 0,
1095 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1096 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1097 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1098 CDU_REG_DBG_FORCE_FRAME,
1099 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1102 static struct block_defs block_ccfc_defs = {
1104 {true, true}, false, 0,
1105 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1106 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1107 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1108 CCFC_REG_DBG_FORCE_FRAME,
1109 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1112 static struct block_defs block_tcfc_defs = {
1114 {true, true}, false, 0,
1115 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1116 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1117 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1118 TCFC_REG_DBG_FORCE_FRAME,
1119 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1122 static struct block_defs block_igu_defs = {
1124 {true, true}, false, 0,
1125 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1126 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1127 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1128 IGU_REG_DBG_FORCE_FRAME,
1129 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1132 static struct block_defs block_cau_defs = {
1134 {true, true}, false, 0,
1135 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1136 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1137 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1138 CAU_REG_DBG_FORCE_FRAME,
1139 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1142 static struct block_defs block_umac_defs = {
1144 {false, true}, false, 0,
1145 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1146 UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
1147 UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
1148 UMAC_REG_DBG_FORCE_FRAME,
1149 true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1152 static struct block_defs block_xmac_defs = {
1153 "xmac", {false, false}, false, 0,
1154 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1156 false, false, MAX_DBG_RESET_REGS, 0
1159 static struct block_defs block_dbg_defs = {
1160 "dbg", {false, false}, false, 0,
1161 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1163 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1166 static struct block_defs block_nig_defs = {
1168 {true, true}, false, 0,
1169 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1170 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1171 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1172 NIG_REG_DBG_FORCE_FRAME,
1173 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1176 static struct block_defs block_wol_defs = {
1178 {false, true}, false, 0,
1179 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1180 WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
1181 WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
1182 WOL_REG_DBG_FORCE_FRAME,
1183 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1186 static struct block_defs block_bmbn_defs = {
1188 {false, true}, false, 0,
1189 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
1190 BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
1191 BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
1192 BMBN_REG_DBG_FORCE_FRAME,
1193 false, false, MAX_DBG_RESET_REGS, 0
1196 static struct block_defs block_ipc_defs = {
1197 "ipc", {false, false}, false, 0,
1198 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1200 true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1203 static struct block_defs block_nwm_defs = {
1205 {false, true}, false, 0,
1206 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1207 NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
1208 NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
1209 NWM_REG_DBG_FORCE_FRAME,
1210 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1213 static struct block_defs block_nws_defs = {
1215 {false, true}, false, 0,
1216 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1217 NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
1218 NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
1219 NWS_REG_DBG_FORCE_FRAME,
1220 true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1223 static struct block_defs block_ms_defs = {
1225 {false, true}, false, 0,
1226 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1227 MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
1228 MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
1229 MS_REG_DBG_FORCE_FRAME,
1230 true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1233 static struct block_defs block_phy_pcie_defs = {
1235 {false, true}, false, 0,
1236 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1237 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
1238 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
1239 PCIE_REG_DBG_COMMON_FORCE_FRAME,
1240 false, false, MAX_DBG_RESET_REGS, 0
1243 static struct block_defs block_led_defs = {
1244 "led", {false, false}, false, 0,
1245 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1247 true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1250 static struct block_defs block_avs_wrap_defs = {
1251 "avs_wrap", {false, false}, false, 0,
1252 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1254 true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1257 static struct block_defs block_rgfs_defs = {
1258 "rgfs", {false, false}, false, 0,
1259 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1261 false, false, MAX_DBG_RESET_REGS, 0
1264 static struct block_defs block_tgfs_defs = {
1265 "tgfs", {false, false}, false, 0,
1266 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1268 false, false, MAX_DBG_RESET_REGS, 0
1271 static struct block_defs block_ptld_defs = {
1272 "ptld", {false, false}, false, 0,
1273 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1275 false, false, MAX_DBG_RESET_REGS, 0
1278 static struct block_defs block_ypld_defs = {
1279 "ypld", {false, false}, false, 0,
1280 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1282 false, false, MAX_DBG_RESET_REGS, 0
1285 static struct block_defs block_misc_aeu_defs = {
1286 "misc_aeu", {false, false}, false, 0,
1287 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1289 false, false, MAX_DBG_RESET_REGS, 0
1292 static struct block_defs block_bar0_map_defs = {
1293 "bar0_map", {false, false}, false, 0,
1294 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1296 false, false, MAX_DBG_RESET_REGS, 0
1299 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1304 &block_pglue_b_defs,
1314 &block_pswhst2_defs,
1354 &block_pbf_pb1_defs,
1355 &block_pbf_pb2_defs,
1376 &block_phy_pcie_defs,
1378 &block_avs_wrap_defs,
1383 &block_misc_aeu_defs,
1384 &block_bar0_map_defs,
1387 static struct platform_defs s_platform_defs[] = {
1394 static struct grc_param_defs s_grc_param_defs[] = {
1395 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
1396 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
1397 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
1398 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
1399 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
1400 {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
1401 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
1402 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
1403 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
1404 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
1405 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
1406 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
1407 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
1408 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
1409 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
1410 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
1411 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
1412 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
1413 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
1414 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
1415 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
1416 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
1417 {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
1418 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
1419 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
1420 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
1421 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
1422 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
1423 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
1424 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
1425 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
1426 {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
1427 {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
1428 MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
1429 {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
1430 MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
1431 {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
1432 {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
1433 {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
1434 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
1435 {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */
1436 {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */
1437 {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */
1440 static struct rss_mem_defs s_rss_mem_defs[] = {
1441 { "rss_mem_cid", "rss_cid", 0,
1444 { "rss_mem_key_msb", "rss_key", 1024,
1447 { "rss_mem_key_lsb", "rss_key", 2048,
1450 { "rss_mem_info", "rss_info", 3072,
1453 { "rss_mem_ind", "rss_ind", 4096,
1454 {(128 * 128), (128 * 208)},
1458 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1459 {"vfc_ram_tt1", "vfc_ram", 0, 512},
1460 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
1461 {"vfc_ram_stt2", "vfc_ram", 640, 32},
1462 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1465 static struct big_ram_defs s_big_ram_defs[] = {
1466 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1467 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1469 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1470 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1472 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1473 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1477 static struct reset_reg_defs s_reset_regs_defs[] = {
1478 { MISCS_REG_RESET_PL_UA, 0x0,
1479 {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
1480 { MISCS_REG_RESET_PL_HV, 0x0,
1481 {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
1482 { MISCS_REG_RESET_PL_HV_2, 0x0,
1483 {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
1484 { MISC_REG_RESET_PL_UA, 0x0,
1485 {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
1486 { MISC_REG_RESET_PL_HV, 0x0,
1487 {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
1488 { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
1489 {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1490 { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
1491 {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1492 { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
1493 {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1496 static struct phy_defs s_phy_defs[] = {
1497 {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
1498 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
1499 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
1500 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
1501 {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
1502 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
1503 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
1504 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
1505 {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
1506 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
1507 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
1508 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
1509 {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
1510 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
1511 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
1512 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
1515 /**************************** Private Functions ******************************/
1517 /* Reads and returns a single dword from the specified unaligned buffer */
1518 static u32 qed_read_unaligned_dword(u8 *buf)
1522 memcpy((u8 *)&dword, buf, sizeof(dword));
1526 /* Returns the value of the specified GRC param */
1527 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1528 enum dbg_grc_params grc_param)
1530 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1532 return dev_data->grc.param_val[grc_param];
1535 /* Initializes the GRC parameters */
1536 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1538 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1540 if (!dev_data->grc.params_initialized) {
1541 qed_dbg_grc_set_params_default(p_hwfn);
1542 dev_data->grc.params_initialized = 1;
1546 /* Initializes debug data for the specified device */
1547 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1548 struct qed_ptt *p_ptt)
1550 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1552 if (dev_data->initialized)
1553 return DBG_STATUS_OK;
1555 if (QED_IS_K2(p_hwfn->cdev)) {
1556 dev_data->chip_id = CHIP_K2;
1557 dev_data->mode_enable[MODE_K2] = 1;
1558 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1559 dev_data->chip_id = CHIP_BB_B0;
1560 dev_data->mode_enable[MODE_BB] = 1;
1562 return DBG_STATUS_UNKNOWN_CHIP;
1565 dev_data->platform_id = PLATFORM_ASIC;
1566 dev_data->mode_enable[MODE_ASIC] = 1;
1568 /* Initializes the GRC parameters */
1569 qed_dbg_grc_init_params(p_hwfn);
1571 dev_data->initialized = true;
1572 return DBG_STATUS_OK;
1575 /* Reads the FW info structure for the specified Storm from the chip,
1576 * and writes it to the specified fw_info pointer.
1578 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1579 struct qed_ptt *p_ptt,
1580 u8 storm_id, struct fw_info *fw_info)
1582 /* Read first the address that points to fw_info location.
1583 * The address is located in the last line of the Storm RAM.
1585 u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1586 SEM_FAST_REG_INT_RAM +
1587 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
1588 sizeof(struct fw_info_location);
1589 struct fw_info_location fw_info_location;
1590 u32 *dest = (u32 *)&fw_info_location;
1593 memset(&fw_info_location, 0, sizeof(fw_info_location));
1594 memset(fw_info, 0, sizeof(*fw_info));
1595 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1596 i++, addr += BYTES_IN_DWORD)
1597 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1598 if (fw_info_location.size > 0 && fw_info_location.size <=
1600 /* Read FW version info from Storm RAM */
1601 addr = fw_info_location.grc_addr;
1602 dest = (u32 *)fw_info;
1603 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1604 i++, addr += BYTES_IN_DWORD)
1605 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1609 /* Dumps the specified string to the specified buffer. Returns the dumped size
1610 * in bytes (actual length + 1 for the null character termination).
1612 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1615 strcpy(dump_buf, str);
1616 return (u32)strlen(str) + 1;
1619 /* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
1622 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1624 u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
1626 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1628 if (dump && align_size)
1629 memset(dump_buf, 0, align_size);
1633 /* Writes the specified string param to the specified buffer.
1634 * Returns the dumped size in dwords.
1636 static u32 qed_dump_str_param(u32 *dump_buf,
1638 const char *param_name, const char *param_val)
1640 char *char_buf = (char *)dump_buf;
1643 /* Dump param name */
1644 offset += qed_dump_str(char_buf + offset, dump, param_name);
1646 /* Indicate a string param value */
1648 *(char_buf + offset) = 1;
1651 /* Dump param value */
1652 offset += qed_dump_str(char_buf + offset, dump, param_val);
1654 /* Align buffer to next dword */
1655 offset += qed_dump_align(char_buf + offset, dump, offset);
1656 return BYTES_TO_DWORDS(offset);
1659 /* Writes the specified numeric param to the specified buffer.
1660 * Returns the dumped size in dwords.
1662 static u32 qed_dump_num_param(u32 *dump_buf,
1663 bool dump, const char *param_name, u32 param_val)
1665 char *char_buf = (char *)dump_buf;
1668 /* Dump param name */
1669 offset += qed_dump_str(char_buf + offset, dump, param_name);
1671 /* Indicate a numeric param value */
1673 *(char_buf + offset) = 0;
1676 /* Align buffer to next dword */
1677 offset += qed_dump_align(char_buf + offset, dump, offset);
1679 /* Dump param value (and change offset from bytes to dwords) */
1680 offset = BYTES_TO_DWORDS(offset);
1682 *(dump_buf + offset) = param_val;
1687 /* Reads the FW version and writes it as a param to the specified buffer.
1688 * Returns the dumped size in dwords.
1690 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1691 struct qed_ptt *p_ptt,
1692 u32 *dump_buf, bool dump)
1694 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1695 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1696 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1697 struct fw_info fw_info = { {0}, {0} };
1701 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1702 /* Read FW image/version from PRAM in a non-reset SEMI */
1706 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1708 /* Read FW version/image */
1709 if (!dev_data->block_in_reset
1710 [s_storm_defs[storm_id].block_id]) {
1711 /* read FW info for the current Storm */
1712 qed_read_fw_info(p_hwfn,
1713 p_ptt, storm_id, &fw_info);
1715 /* Create FW version/image strings */
1717 snprintf(fw_ver_str,
1720 fw_info.ver.num.major,
1721 fw_info.ver.num.minor,
1722 fw_info.ver.num.rev,
1723 fw_info.ver.num.eng);
1724 if (printed_chars < 0 || printed_chars >=
1727 "Unexpected debug error: invalid FW version string\n");
1728 switch (fw_info.ver.image_id) {
1730 strcpy(fw_img_str, "main");
1733 strcpy(fw_img_str, "unknown");
1742 /* Dump FW version, image and timestamp */
1743 offset += qed_dump_str_param(dump_buf + offset,
1744 dump, "fw-version", fw_ver_str);
1745 offset += qed_dump_str_param(dump_buf + offset,
1746 dump, "fw-image", fw_img_str);
1747 offset += qed_dump_num_param(dump_buf + offset,
1749 "fw-timestamp", fw_info.ver.timestamp);
1753 /* Reads the MFW version and writes it as a param to the specified buffer.
1754 * Returns the dumped size in dwords.
1756 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1757 struct qed_ptt *p_ptt,
1758 u32 *dump_buf, bool dump)
1760 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1762 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1763 u32 global_section_offsize, global_section_addr, mfw_ver;
1764 u32 public_data_addr, global_section_offsize_addr;
1767 /* Find MCP public data GRC address.
1768 * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
1770 public_data_addr = qed_rd(p_hwfn, p_ptt,
1771 MISC_REG_SHARED_MEM_ADDR) |
1774 /* Find MCP public global section offset */
1775 global_section_offsize_addr = public_data_addr +
1776 offsetof(struct mcp_public_data,
1778 sizeof(offsize_t) * PUBLIC_GLOBAL;
1779 global_section_offsize = qed_rd(p_hwfn, p_ptt,
1780 global_section_offsize_addr);
1781 global_section_addr = MCP_REG_SCRATCH +
1782 (global_section_offsize &
1783 OFFSIZE_OFFSET_MASK) * 4;
1785 /* Read MFW version from MCP public global section */
1786 mfw_ver = qed_rd(p_hwfn, p_ptt,
1787 global_section_addr +
1788 offsetof(struct public_global, mfw_ver));
1790 /* Dump MFW version param */
1791 printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
1793 (u8) (mfw_ver >> 24),
1794 (u8) (mfw_ver >> 16),
1795 (u8) (mfw_ver >> 8),
1797 if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
1799 "Unexpected debug error: invalid MFW version string\n");
1802 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1805 /* Writes a section header to the specified buffer.
1806 * Returns the dumped size in dwords.
1808 static u32 qed_dump_section_hdr(u32 *dump_buf,
1809 bool dump, const char *name, u32 num_params)
1811 return qed_dump_num_param(dump_buf, dump, name, num_params);
1814 /* Writes the common global params to the specified buffer.
1815 * Returns the dumped size in dwords.
1817 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1818 struct qed_ptt *p_ptt,
1821 u8 num_specific_global_params)
1823 u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
1824 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1827 /* Find platform string and dump global params section header */
1828 offset += qed_dump_section_hdr(dump_buf + offset,
1829 dump, "global_params", num_params);
1832 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1833 offset += qed_dump_mfw_ver_param(p_hwfn,
1834 p_ptt, dump_buf + offset, dump);
1835 offset += qed_dump_num_param(dump_buf + offset,
1836 dump, "tools-version", TOOLS_VERSION);
1837 offset += qed_dump_str_param(dump_buf + offset,
1840 s_chip_defs[dev_data->chip_id].name);
1841 offset += qed_dump_str_param(dump_buf + offset,
1844 s_platform_defs[dev_data->platform_id].
1847 qed_dump_num_param(dump_buf + offset, dump, "pci-func",
1852 /* Writes the last section to the specified buffer at the given offset.
1853 * Returns the dumped size in dwords.
1855 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1857 u32 start_offset = offset, crc = ~0;
1859 /* Dump CRC section header */
1860 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1862 /* Calculate CRC32 and add it to the dword following the "last" section.
1865 *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
1866 DWORDS_TO_BYTES(offset));
1868 return offset - start_offset;
1871 /* Update blocks reset state */
1872 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1873 struct qed_ptt *p_ptt)
1875 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1876 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
1879 /* Read reset registers */
1880 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
1881 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
1882 reg_val[i] = qed_rd(p_hwfn,
1883 p_ptt, s_reset_regs_defs[i].addr);
1885 /* Check if blocks are in reset */
1886 for (i = 0; i < MAX_BLOCK_ID; i++)
1887 dev_data->block_in_reset[i] =
1888 s_block_defs[i]->has_reset_bit &&
1889 !(reg_val[s_block_defs[i]->reset_reg] &
1890 BIT(s_block_defs[i]->reset_bit_offset));
1893 /* Enable / disable the Debug block */
1894 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1895 struct qed_ptt *p_ptt, bool enable)
1897 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1900 /* Resets the Debug block */
1901 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1902 struct qed_ptt *p_ptt)
1904 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1906 dbg_reset_reg_addr =
1907 s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
1908 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
1909 new_reset_reg_val = old_reset_reg_val &
1910 ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
1912 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
1913 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
1916 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
1917 struct qed_ptt *p_ptt,
1918 enum dbg_bus_frame_modes mode)
1920 qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
1923 /* Enable / disable Debug Bus clients according to the specified mask.
1924 * (1 = enable, 0 = disable)
1926 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1927 struct qed_ptt *p_ptt, u32 client_mask)
1929 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1932 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1934 const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1935 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1936 u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
1940 case INIT_MODE_OP_NOT:
1941 return !qed_is_mode_match(p_hwfn, modes_buf_offset);
1942 case INIT_MODE_OP_OR:
1943 case INIT_MODE_OP_AND:
1944 arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
1945 arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
1946 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1947 arg2) : (arg1 && arg2);
1949 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1953 /* Returns true if the specified entity (indicated by GRC param) should be
1954 * included in the dump, false otherwise.
1956 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1957 enum dbg_grc_params grc_param)
1959 return qed_grc_get_param(p_hwfn, grc_param) > 0;
1962 /* Returns true of the specified Storm should be included in the dump, false
1965 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1966 enum dbg_storms storm)
1968 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1971 /* Returns true if the specified memory should be included in the dump, false
1974 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1975 enum block_id block_id, u8 mem_group_id)
1979 /* Check Storm match */
1980 if (s_block_defs[block_id]->associated_to_storm &&
1981 !qed_grc_is_storm_included(p_hwfn,
1982 (enum dbg_storms)s_block_defs[block_id]->storm_id))
1985 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
1986 if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
1987 mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
1988 return qed_grc_is_included(p_hwfn,
1989 s_big_ram_defs[i].grc_param);
1990 if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
1992 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1993 if (mem_group_id == MEM_GROUP_RAM)
1994 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1995 if (mem_group_id == MEM_GROUP_PBUF)
1996 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1997 if (mem_group_id == MEM_GROUP_CAU_MEM ||
1998 mem_group_id == MEM_GROUP_CAU_SB ||
1999 mem_group_id == MEM_GROUP_CAU_PI)
2000 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2001 if (mem_group_id == MEM_GROUP_QM_MEM)
2002 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2003 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
2004 mem_group_id == MEM_GROUP_TASK_CFC_MEM)
2005 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
2006 if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
2008 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2009 if (mem_group_id == MEM_GROUP_MULD_MEM)
2010 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2011 if (mem_group_id == MEM_GROUP_PRS_MEM)
2012 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2013 if (mem_group_id == MEM_GROUP_DMAE_MEM)
2014 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2015 if (mem_group_id == MEM_GROUP_TM_MEM)
2016 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2017 if (mem_group_id == MEM_GROUP_SDM_MEM)
2018 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2019 if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
2021 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2022 if (mem_group_id == MEM_GROUP_CM_MEM)
2023 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2024 if (mem_group_id == MEM_GROUP_IOR)
2025 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2030 /* Stalls all Storms */
2031 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2032 struct qed_ptt *p_ptt, bool stall)
2034 u8 reg_val = stall ? 1 : 0;
2037 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2038 if (qed_grc_is_storm_included(p_hwfn,
2039 (enum dbg_storms)storm_id)) {
2041 s_storm_defs[storm_id].sem_fast_mem_addr +
2042 SEM_FAST_REG_STALL_0;
2044 qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
2048 msleep(STALL_DELAY_MS);
2051 /* Takes all blocks out of reset */
2052 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2053 struct qed_ptt *p_ptt)
2055 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2056 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2059 /* Fill reset regs values */
2060 for (i = 0; i < MAX_BLOCK_ID; i++)
2061 if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
2062 reg_val[s_block_defs[i]->reset_reg] |=
2063 BIT(s_block_defs[i]->reset_bit_offset);
2065 /* Write reset registers */
2066 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2067 if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
2068 reg_val[i] |= s_reset_regs_defs[i].unreset_val;
2072 s_reset_regs_defs[i].addr +
2073 RESET_REG_UNRESET_OFFSET, reg_val[i]);
2078 /* Returns the attention block data of the specified block */
2079 static const struct dbg_attn_block_type_data *
2080 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2082 const struct dbg_attn_block *base_attn_block_arr =
2083 (const struct dbg_attn_block *)
2084 s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2086 return &base_attn_block_arr[block_id].per_type_data[attn_type];
2089 /* Returns the attention registers of the specified block */
2090 static const struct dbg_attn_reg *
2091 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2094 const struct dbg_attn_block_type_data *block_type_data =
2095 qed_get_block_attn_data(block_id, attn_type);
2097 *num_attn_regs = block_type_data->num_regs;
2098 return &((const struct dbg_attn_reg *)
2099 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2103 /* For each block, clear the status of all parities */
2104 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2105 struct qed_ptt *p_ptt)
2107 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2108 u8 reg_idx, num_attn_regs;
2111 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2112 const struct dbg_attn_reg *attn_reg_arr;
2114 if (dev_data->block_in_reset[block_id])
2117 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2120 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2121 const struct dbg_attn_reg *reg_data =
2122 &attn_reg_arr[reg_idx];
2125 bool eval_mode = GET_FIELD(reg_data->mode.data,
2126 DBG_MODE_HDR_EVAL_MODE) > 0;
2127 u16 modes_buf_offset =
2128 GET_FIELD(reg_data->mode.data,
2129 DBG_MODE_HDR_MODES_BUF_OFFSET);
2132 qed_is_mode_match(p_hwfn, &modes_buf_offset))
2133 /* Mode match - read parity status read-clear
2136 qed_rd(p_hwfn, p_ptt,
2137 DWORDS_TO_BYTES(reg_data->
2143 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2144 * The following parameters are dumped:
2145 * - 'count' = num_dumped_entries
2146 * - 'split' = split_type
2147 * - 'id' = split_id (dumped only if split_id >= 0)
2148 * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
2149 * param_val != NULL)
2151 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2153 u32 num_reg_entries,
2154 const char *split_type,
2156 const char *param_name, const char *param_val)
2158 u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2161 offset += qed_dump_section_hdr(dump_buf + offset,
2162 dump, "grc_regs", num_params);
2163 offset += qed_dump_num_param(dump_buf + offset,
2164 dump, "count", num_reg_entries);
2165 offset += qed_dump_str_param(dump_buf + offset,
2166 dump, "split", split_type);
2168 offset += qed_dump_num_param(dump_buf + offset,
2169 dump, "id", split_id);
2170 if (param_name && param_val)
2171 offset += qed_dump_str_param(dump_buf + offset,
2172 dump, param_name, param_val);
2176 /* Dumps the GRC registers in the specified address range.
2177 * Returns the dumped size in dwords.
2179 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2180 struct qed_ptt *p_ptt, u32 *dump_buf,
2181 bool dump, u32 addr, u32 len)
2183 u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2186 for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2187 *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
2193 /* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
2194 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
2198 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2202 /* Dumps GRC registers sequence. Returns the dumped size in dwords. */
2203 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2204 struct qed_ptt *p_ptt, u32 *dump_buf,
2205 bool dump, u32 addr, u32 len)
2209 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2210 offset += qed_grc_dump_addr_range(p_hwfn,
2212 dump_buf + offset, dump, addr, len);
2216 /* Dumps GRC registers sequence with skip cycle.
2217 * Returns the dumped size in dwords.
2219 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2220 struct qed_ptt *p_ptt, u32 *dump_buf,
2221 bool dump, u32 addr, u32 total_len,
2222 u32 read_len, u32 skip_len)
2224 u32 offset = 0, reg_offset = 0;
2226 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2228 while (reg_offset < total_len) {
2229 u32 curr_len = min_t(u32,
2231 total_len - reg_offset);
2232 offset += qed_grc_dump_addr_range(p_hwfn,
2235 dump, addr, curr_len);
2236 reg_offset += curr_len;
2238 if (reg_offset < total_len) {
2239 curr_len = min_t(u32,
2241 total_len - skip_len);
2242 memset(dump_buf + offset, 0,
2243 DWORDS_TO_BYTES(curr_len));
2245 reg_offset += curr_len;
2250 offset += total_len;
2256 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2257 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2258 struct qed_ptt *p_ptt,
2259 struct dbg_array input_regs_arr,
2262 bool block_enable[MAX_BLOCK_ID],
2263 u32 *num_dumped_reg_entries)
2265 u32 i, offset = 0, input_offset = 0;
2266 bool mode_match = true;
2268 *num_dumped_reg_entries = 0;
2269 while (input_offset < input_regs_arr.size_in_dwords) {
2270 const struct dbg_dump_cond_hdr *cond_hdr =
2271 (const struct dbg_dump_cond_hdr *)
2272 &input_regs_arr.ptr[input_offset++];
2273 bool eval_mode = GET_FIELD(cond_hdr->mode.data,
2274 DBG_MODE_HDR_EVAL_MODE) > 0;
2276 /* Check mode/block */
2278 u16 modes_buf_offset =
2279 GET_FIELD(cond_hdr->mode.data,
2280 DBG_MODE_HDR_MODES_BUF_OFFSET);
2281 mode_match = qed_is_mode_match(p_hwfn,
2285 if (mode_match && block_enable[cond_hdr->block_id]) {
2286 for (i = 0; i < cond_hdr->data_size;
2287 i++, input_offset++) {
2288 const struct dbg_dump_reg *reg =
2289 (const struct dbg_dump_reg *)
2290 &input_regs_arr.ptr[input_offset];
2293 addr = GET_FIELD(reg->data,
2294 DBG_DUMP_REG_ADDRESS);
2295 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2297 qed_grc_dump_reg_entry(p_hwfn, p_ptt,
2302 (*num_dumped_reg_entries)++;
2305 input_offset += cond_hdr->data_size;
2312 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2313 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2314 struct qed_ptt *p_ptt,
2315 struct dbg_array input_regs_arr,
2318 bool block_enable[MAX_BLOCK_ID],
2319 const char *split_type_name,
2321 const char *param_name,
2322 const char *param_val)
2324 u32 num_dumped_reg_entries, offset;
2326 /* Calculate register dump header size (and skip it for now) */
2327 offset = qed_grc_dump_regs_hdr(dump_buf,
2331 split_id, param_name, param_val);
2333 /* Dump registers */
2334 offset += qed_grc_dump_regs_entries(p_hwfn,
2340 &num_dumped_reg_entries);
2342 /* Write register dump header */
2343 if (dump && num_dumped_reg_entries > 0)
2344 qed_grc_dump_regs_hdr(dump_buf,
2346 num_dumped_reg_entries,
2348 split_id, param_name, param_val);
2350 return num_dumped_reg_entries > 0 ? offset : 0;
2353 /* Dumps registers according to the input registers array.
2354 * Returns the dumped size in dwords.
2356 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2357 struct qed_ptt *p_ptt,
2360 bool block_enable[MAX_BLOCK_ID],
2361 const char *param_name, const char *param_val)
2363 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2364 struct chip_platform_defs *p_platform_defs;
2365 u32 offset = 0, input_offset = 0;
2366 struct chip_defs *p_chip_defs;
2367 u8 port_id, pf_id, vf_id;
2370 p_chip_defs = &s_chip_defs[dev_data->chip_id];
2371 p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
2374 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
2375 while (input_offset <
2376 s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2377 const struct dbg_dump_split_hdr *split_hdr =
2378 (const struct dbg_dump_split_hdr *)
2379 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2380 u8 split_type_id = GET_FIELD(split_hdr->hdr,
2381 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2382 u32 split_data_size = GET_FIELD(split_hdr->hdr,
2383 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2384 struct dbg_array curr_input_regs_arr = {
2385 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
2388 switch (split_type_id) {
2389 case SPLIT_TYPE_NONE:
2390 offset += qed_grc_dump_split_data(p_hwfn,
2392 curr_input_regs_arr,
2401 case SPLIT_TYPE_PORT:
2402 for (port_id = 0; port_id < p_platform_defs->num_ports;
2405 qed_port_pretend(p_hwfn, p_ptt,
2408 qed_grc_dump_split_data(p_hwfn, p_ptt,
2409 curr_input_regs_arr,
2418 case SPLIT_TYPE_PORT_PF:
2419 for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
2422 PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2425 fid = pf_id << pfid_shift;
2426 qed_fid_pretend(p_hwfn, p_ptt, fid);
2430 qed_grc_dump_split_data(p_hwfn, p_ptt,
2431 curr_input_regs_arr,
2440 for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
2443 PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2445 PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2448 fid = BIT(vfvalid_shift) |
2449 (vf_id << vfid_shift);
2450 qed_fid_pretend(p_hwfn, p_ptt, fid);
2454 qed_grc_dump_split_data(p_hwfn, p_ptt,
2455 curr_input_regs_arr,
2467 input_offset += split_data_size;
2470 /* Pretend to original PF */
2472 fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2473 qed_fid_pretend(p_hwfn, p_ptt, fid);
2479 /* Dump reset registers. Returns the dumped size in dwords. */
2480 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2481 struct qed_ptt *p_ptt,
2482 u32 *dump_buf, bool dump)
2484 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2485 u32 i, offset = 0, num_regs = 0;
2487 /* Calculate header size */
2488 offset += qed_grc_dump_regs_hdr(dump_buf,
2489 false, 0, "eng", -1, NULL, NULL);
2491 /* Write reset registers */
2492 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2493 if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
2494 u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
2496 offset += qed_grc_dump_reg_entry(p_hwfn,
2508 qed_grc_dump_regs_hdr(dump_buf,
2509 true, num_regs, "eng", -1, NULL, NULL);
2513 /* Dump registers that are modified during GRC Dump and therefore must be dumped
2514 * first. Returns the dumped size in dwords.
2516 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2517 struct qed_ptt *p_ptt,
2518 u32 *dump_buf, bool dump)
2520 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2521 u32 offset = 0, num_reg_entries = 0, block_id;
2522 u8 storm_id, reg_idx, num_attn_regs;
2524 /* Calculate header size */
2525 offset += qed_grc_dump_regs_hdr(dump_buf,
2526 false, 0, "eng", -1, NULL, NULL);
2528 /* Write parity registers */
2529 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2530 const struct dbg_attn_reg *attn_reg_arr;
2532 if (dev_data->block_in_reset[block_id] && dump)
2535 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2538 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2539 const struct dbg_attn_reg *reg_data =
2540 &attn_reg_arr[reg_idx];
2541 u16 modes_buf_offset;
2546 eval_mode = GET_FIELD(reg_data->mode.data,
2547 DBG_MODE_HDR_EVAL_MODE) > 0;
2549 GET_FIELD(reg_data->mode.data,
2550 DBG_MODE_HDR_MODES_BUF_OFFSET);
2552 qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
2553 /* Mode match - read and dump registers */
2554 addr = reg_data->mask_address;
2556 qed_grc_dump_reg_entry(p_hwfn,
2562 addr = GET_FIELD(reg_data->data,
2563 DBG_ATTN_REG_STS_ADDRESS);
2565 qed_grc_dump_reg_entry(p_hwfn,
2571 num_reg_entries += 2;
2576 /* Write storm stall status registers */
2577 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2580 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
2585 BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2586 SEM_FAST_REG_STALLED);
2587 offset += qed_grc_dump_reg_entry(p_hwfn,
2598 qed_grc_dump_regs_hdr(dump_buf,
2600 num_reg_entries, "eng", -1, NULL, NULL);
2604 /* Dumps registers that can't be represented in the debug arrays */
2605 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2606 struct qed_ptt *p_ptt,
2607 u32 *dump_buf, bool dump)
2609 u32 offset = 0, addr;
2611 offset += qed_grc_dump_regs_hdr(dump_buf,
2612 dump, 2, "eng", -1, NULL, NULL);
2614 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2617 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2618 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2623 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2626 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2628 qed_grc_dump_reg_entry_skip(p_hwfn,
2633 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2640 /* Dumps a GRC memory header (section and params).
2641 * The following parameters are dumped:
2642 * name - name is dumped only if it's not NULL.
2643 * addr - addr is dumped only if name is NULL.
2644 * len - len is always dumped.
2645 * width - bit_width is dumped if it's not zero.
2646 * packed - packed=1 is dumped if it's not false.
2647 * mem_group - mem_group is always dumped.
2648 * is_storm - true only if the memory is related to a Storm.
2649 * storm_letter - storm letter (valid only if is_storm is true).
2650 * Returns the dumped size in dwords.
2652 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2660 const char *mem_group,
2661 bool is_storm, char storm_letter)
2669 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2675 /* Dump section header */
2676 offset += qed_dump_section_hdr(dump_buf + offset,
2677 dump, "grc_mem", num_params);
2681 strcpy(buf, "?STORM_");
2682 buf[0] = storm_letter;
2683 strcpy(buf + strlen(buf), name);
2688 offset += qed_dump_str_param(dump_buf + offset,
2693 "Dumping %d registers from %s...\n",
2697 offset += qed_dump_num_param(dump_buf + offset,
2699 DWORDS_TO_BYTES(addr));
2700 if (dump && len > 64)
2703 "Dumping %d registers from address 0x%x...\n",
2704 len, (u32)DWORDS_TO_BYTES(addr));
2708 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2710 /* Dump bit width */
2712 offset += qed_dump_num_param(dump_buf + offset,
2713 dump, "width", bit_width);
2717 offset += qed_dump_num_param(dump_buf + offset,
2722 strcpy(buf, "?STORM_");
2723 buf[0] = storm_letter;
2724 strcpy(buf + strlen(buf), mem_group);
2726 strcpy(buf, mem_group);
2729 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2733 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2734 * Returns the dumped size in dwords.
2736 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2737 struct qed_ptt *p_ptt,
2745 const char *mem_group,
2746 bool is_storm, char storm_letter)
2750 offset += qed_grc_dump_mem_hdr(p_hwfn,
2758 mem_group, is_storm, storm_letter);
2759 offset += qed_grc_dump_addr_range(p_hwfn,
2761 dump_buf + offset, dump, addr, len);
2765 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
2766 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2767 struct qed_ptt *p_ptt,
2768 struct dbg_array input_mems_arr,
2769 u32 *dump_buf, bool dump)
2771 u32 i, offset = 0, input_offset = 0;
2772 bool mode_match = true;
2774 while (input_offset < input_mems_arr.size_in_dwords) {
2775 const struct dbg_dump_cond_hdr *cond_hdr;
2779 cond_hdr = (const struct dbg_dump_cond_hdr *)
2780 &input_mems_arr.ptr[input_offset++];
2781 eval_mode = GET_FIELD(cond_hdr->mode.data,
2782 DBG_MODE_HDR_EVAL_MODE) > 0;
2784 /* Check required mode */
2786 u16 modes_buf_offset =
2787 GET_FIELD(cond_hdr->mode.data,
2788 DBG_MODE_HDR_MODES_BUF_OFFSET);
2790 mode_match = qed_is_mode_match(p_hwfn,
2795 input_offset += cond_hdr->data_size;
2799 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2800 for (i = 0; i < num_entries;
2801 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2802 const struct dbg_dump_mem *mem =
2803 (const struct dbg_dump_mem *)
2804 &input_mems_arr.ptr[input_offset];
2807 mem_group_id = GET_FIELD(mem->dword0,
2808 DBG_DUMP_MEM_MEM_GROUP_ID);
2809 if (mem_group_id >= MEM_GROUPS_NUM) {
2810 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2814 if (qed_grc_is_mem_included(p_hwfn,
2815 (enum block_id)cond_hdr->block_id,
2817 u32 mem_addr = GET_FIELD(mem->dword0,
2818 DBG_DUMP_MEM_ADDRESS);
2819 u32 mem_len = GET_FIELD(mem->dword1,
2820 DBG_DUMP_MEM_LENGTH);
2821 enum dbg_grc_params grc_param;
2822 char storm_letter = 'a';
2823 bool is_storm = false;
2825 /* Update memory length for CCFC/TCFC memories
2826 * according to number of LCIDs/LTIDs.
2828 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
2829 if (mem_len % MAX_LCIDS != 0) {
2831 "Invalid CCFC connection memory size\n");
2835 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
2836 mem_len = qed_grc_get_param(p_hwfn,
2838 (mem_len / MAX_LCIDS);
2839 } else if (mem_group_id ==
2840 MEM_GROUP_TASK_CFC_MEM) {
2841 if (mem_len % MAX_LTIDS != 0) {
2843 "Invalid TCFC task memory size\n");
2847 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
2848 mem_len = qed_grc_get_param(p_hwfn,
2850 (mem_len / MAX_LTIDS);
2853 /* If memory is associated with Storm, update
2856 if (s_block_defs[cond_hdr->block_id]->
2857 associated_to_storm) {
2860 s_storm_defs[s_block_defs[
2861 cond_hdr->block_id]->
2866 offset += qed_grc_dump_mem(p_hwfn, p_ptt,
2867 dump_buf + offset, dump, NULL,
2868 mem_addr, mem_len, 0,
2870 s_mem_group_names[mem_group_id],
2871 is_storm, storm_letter);
2879 /* Dumps GRC memories according to the input array dump_mem.
2880 * Returns the dumped size in dwords.
2882 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2883 struct qed_ptt *p_ptt,
2884 u32 *dump_buf, bool dump)
2886 u32 offset = 0, input_offset = 0;
2888 while (input_offset <
2889 s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
2890 const struct dbg_dump_split_hdr *split_hdr =
2891 (const struct dbg_dump_split_hdr *)
2892 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
2893 u8 split_type_id = GET_FIELD(split_hdr->hdr,
2894 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2895 u32 split_data_size = GET_FIELD(split_hdr->hdr,
2896 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2897 struct dbg_array curr_input_mems_arr = {
2898 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
2901 switch (split_type_id) {
2902 case SPLIT_TYPE_NONE:
2903 offset += qed_grc_dump_mem_entries(p_hwfn,
2905 curr_input_mems_arr,
2911 "Dumping split memories is currently not supported\n");
2915 input_offset += split_data_size;
2921 /* Dumps GRC context data for the specified Storm.
2922 * Returns the dumped size in dwords.
2924 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2925 struct qed_ptt *p_ptt,
2934 u32 i, lid, total_size;
2939 lid_size *= BYTES_IN_DWORD;
2940 total_size = num_lids * lid_size;
2941 offset += qed_grc_dump_mem_hdr(p_hwfn,
2950 true, s_storm_defs[storm_id].letter);
2952 /* Dump context data */
2954 for (lid = 0; lid < num_lids; lid++) {
2955 for (i = 0; i < lid_size; i++, offset++) {
2958 s_storm_defs[storm_id].cm_ctx_wr_addr,
2960 *(dump_buf + offset) = qed_rd(p_hwfn,
2966 offset += total_size;
2972 /* Dumps GRC contexts. Returns the dumped size in dwords. */
2973 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2974 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2979 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2980 if (!qed_grc_is_storm_included(p_hwfn,
2981 (enum dbg_storms)storm_id))
2984 /* Dump Conn AG context size */
2986 qed_grc_dump_ctx_data(p_hwfn,
2991 qed_grc_get_param(p_hwfn,
2992 DBG_GRC_PARAM_NUM_LCIDS),
2993 s_storm_defs[storm_id].
2994 cm_conn_ag_ctx_lid_size,
2995 s_storm_defs[storm_id].
2996 cm_conn_ag_ctx_rd_addr,
2999 /* Dump Conn ST context size */
3001 qed_grc_dump_ctx_data(p_hwfn,
3006 qed_grc_get_param(p_hwfn,
3007 DBG_GRC_PARAM_NUM_LCIDS),
3008 s_storm_defs[storm_id].
3009 cm_conn_st_ctx_lid_size,
3010 s_storm_defs[storm_id].
3011 cm_conn_st_ctx_rd_addr,
3014 /* Dump Task AG context size */
3016 qed_grc_dump_ctx_data(p_hwfn,
3021 qed_grc_get_param(p_hwfn,
3022 DBG_GRC_PARAM_NUM_LTIDS),
3023 s_storm_defs[storm_id].
3024 cm_task_ag_ctx_lid_size,
3025 s_storm_defs[storm_id].
3026 cm_task_ag_ctx_rd_addr,
3029 /* Dump Task ST context size */
3031 qed_grc_dump_ctx_data(p_hwfn,
3036 qed_grc_get_param(p_hwfn,
3037 DBG_GRC_PARAM_NUM_LTIDS),
3038 s_storm_defs[storm_id].
3039 cm_task_st_ctx_lid_size,
3040 s_storm_defs[storm_id].
3041 cm_task_st_ctx_rd_addr,
3048 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3049 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3050 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3052 char buf[10] = "IOR_SET_?";
3053 u8 storm_id, set_id;
3056 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3057 struct storm_defs *storm = &s_storm_defs[storm_id];
3059 if (!qed_grc_is_storm_included(p_hwfn,
3060 (enum dbg_storms)storm_id))
3063 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3066 dwords = storm->sem_fast_mem_addr +
3067 SEM_FAST_REG_STORM_REG_FILE;
3068 addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
3069 buf[strlen(buf) - 1] = '0' + set_id;
3070 offset += qed_grc_dump_mem(p_hwfn,
3088 /* Dump VFC CAM. Returns the dumped size in dwords. */
3089 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3090 struct qed_ptt *p_ptt,
3091 u32 *dump_buf, bool dump, u8 storm_id)
3093 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3094 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3095 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3099 offset += qed_grc_dump_mem_hdr(p_hwfn,
3108 true, s_storm_defs[storm_id].letter);
3110 /* Prepare CAM address */
3111 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3112 for (row = 0; row < VFC_CAM_NUM_ROWS;
3113 row++, offset += VFC_CAM_RESP_DWORDS) {
3114 /* Write VFC CAM command */
3115 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3118 s_storm_defs[storm_id].sem_fast_mem_addr +
3119 SEM_FAST_REG_VFC_DATA_WR,
3120 cam_cmd, VFC_CAM_CMD_DWORDS);
3122 /* Write VFC CAM address */
3125 s_storm_defs[storm_id].sem_fast_mem_addr +
3126 SEM_FAST_REG_VFC_ADDR,
3127 cam_addr, VFC_CAM_ADDR_DWORDS);
3129 /* Read VFC CAM read response */
3132 s_storm_defs[storm_id].sem_fast_mem_addr +
3133 SEM_FAST_REG_VFC_DATA_RD,
3134 dump_buf + offset, VFC_CAM_RESP_DWORDS);
3137 offset += total_size;
3143 /* Dump VFC RAM. Returns the dumped size in dwords. */
3144 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3145 struct qed_ptt *p_ptt,
3148 u8 storm_id, struct vfc_ram_defs *ram_defs)
3150 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3151 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3152 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3156 offset += qed_grc_dump_mem_hdr(p_hwfn,
3164 ram_defs->type_name,
3165 true, s_storm_defs[storm_id].letter);
3167 /* Prepare RAM address */
3168 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3171 return offset + total_size;
3173 for (row = ram_defs->base_row;
3174 row < ram_defs->base_row + ram_defs->num_rows;
3175 row++, offset += VFC_RAM_RESP_DWORDS) {
3176 /* Write VFC RAM command */
3179 s_storm_defs[storm_id].sem_fast_mem_addr +
3180 SEM_FAST_REG_VFC_DATA_WR,
3181 ram_cmd, VFC_RAM_CMD_DWORDS);
3183 /* Write VFC RAM address */
3184 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3187 s_storm_defs[storm_id].sem_fast_mem_addr +
3188 SEM_FAST_REG_VFC_ADDR,
3189 ram_addr, VFC_RAM_ADDR_DWORDS);
3191 /* Read VFC RAM read response */
3194 s_storm_defs[storm_id].sem_fast_mem_addr +
3195 SEM_FAST_REG_VFC_DATA_RD,
3196 dump_buf + offset, VFC_RAM_RESP_DWORDS);
3202 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3203 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3204 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3206 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3210 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3211 if (qed_grc_is_storm_included(p_hwfn,
3212 (enum dbg_storms)storm_id) &&
3213 s_storm_defs[storm_id].has_vfc &&
3214 (storm_id != DBG_PSTORM_ID ||
3215 dev_data->platform_id == PLATFORM_ASIC)) {
3217 offset += qed_grc_dump_vfc_cam(p_hwfn,
3223 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3224 offset += qed_grc_dump_vfc_ram(p_hwfn,
3238 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3239 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3240 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3242 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3246 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3247 struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
3248 u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
3249 u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
3250 u32 total_dwords = (num_entries * entry_width) / 32;
3251 u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
3252 bool packed = (entry_width == 16);
3253 u32 rss_addr = rss_defs->addr;
3256 offset += qed_grc_dump_mem_hdr(p_hwfn,
3264 rss_defs->type_name, false, 0);
3267 offset += total_dwords;
3272 for (i = 0; i < total_dwords;
3273 i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
3274 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3275 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3276 offset += qed_grc_dump_addr_range(p_hwfn,
3289 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3290 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3291 struct qed_ptt *p_ptt,
3292 u32 *dump_buf, bool dump, u8 big_ram_id)
3294 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3295 u32 total_blocks, ram_size, offset = 0, i;
3296 char mem_name[12] = "???_BIG_RAM";
3297 char type_name[8] = "???_RAM";
3298 struct big_ram_defs *big_ram;
3300 big_ram = &s_big_ram_defs[big_ram_id];
3301 total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3302 ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3304 strncpy(type_name, big_ram->instance_name,
3305 strlen(big_ram->instance_name));
3306 strncpy(mem_name, big_ram->instance_name,
3307 strlen(big_ram->instance_name));
3309 /* Dump memory header */
3310 offset += qed_grc_dump_mem_hdr(p_hwfn,
3316 BIG_RAM_BLOCK_SIZE_BYTES * 8,
3317 false, type_name, false, 0);
3320 return offset + ram_size;
3322 /* Read and dump Big RAM data */
3323 for (i = 0; i < total_blocks / 2; i++) {
3326 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3327 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3328 len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
3329 offset += qed_grc_dump_addr_range(p_hwfn,
3340 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3341 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3343 bool block_enable[MAX_BLOCK_ID] = { 0 };
3344 u32 offset = 0, addr;
3345 bool halted = false;
3348 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3349 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3351 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3354 /* Dump MCP scratchpad */
3355 offset += qed_grc_dump_mem(p_hwfn,
3360 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3361 MCP_REG_SCRATCH_SIZE,
3362 0, false, "MCP", false, 0);
3364 /* Dump MCP cpu_reg_file */
3365 offset += qed_grc_dump_mem(p_hwfn,
3370 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3371 MCP_REG_CPU_REG_FILE_SIZE,
3372 0, false, "MCP", false, 0);
3374 /* Dump MCP registers */
3375 block_enable[BLOCK_MCP] = true;
3376 offset += qed_grc_dump_registers(p_hwfn,
3379 dump, block_enable, "block", "MCP");
3381 /* Dump required non-MCP registers */
3382 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3383 dump, 1, "eng", -1, "block", "MCP");
3384 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3385 offset += qed_grc_dump_reg_entry(p_hwfn,
3393 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3394 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3398 /* Dumps the tbus indirect memory for all PHYs. */
3399 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3400 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3402 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3406 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3407 struct phy_defs *phy_defs = &s_phy_defs[phy_id];
3410 printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3411 phy_defs->phy_name);
3412 if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
3414 "Unexpected debug error: invalid PHY memory name\n");
3415 offset += qed_grc_dump_mem_hdr(p_hwfn,
3420 PHY_DUMP_SIZE_DWORDS,
3421 16, true, mem_name, false, 0);
3423 u32 addr_lo_addr = phy_defs->base_addr +
3424 phy_defs->tbus_addr_lo_addr;
3425 u32 addr_hi_addr = phy_defs->base_addr +
3426 phy_defs->tbus_addr_hi_addr;
3427 u32 data_lo_addr = phy_defs->base_addr +
3428 phy_defs->tbus_data_lo_addr;
3429 u32 data_hi_addr = phy_defs->base_addr +
3430 phy_defs->tbus_data_hi_addr;
3431 u8 *bytes_buf = (u8 *)(dump_buf + offset);
3433 for (tbus_hi_offset = 0;
3434 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3437 p_ptt, addr_hi_addr, tbus_hi_offset);
3438 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3442 addr_lo_addr, tbus_lo_offset);
3444 (u8)qed_rd(p_hwfn, p_ptt,
3447 (u8)qed_rd(p_hwfn, p_ptt,
3453 offset += PHY_DUMP_SIZE_DWORDS;
3459 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3460 struct qed_ptt *p_ptt,
3461 enum block_id block_id,
3464 u8 right_shift, u8 force_valid, u8 force_frame)
3466 struct block_defs *p_block_defs = s_block_defs[block_id];
3468 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
3469 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
3470 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
3471 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
3472 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
3475 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3476 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3477 struct qed_ptt *p_ptt,
3478 u32 *dump_buf, bool dump)
3480 u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
3481 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3482 u32 offset = 0, block_id, line_id;
3483 struct block_defs *p_block_defs;
3487 QED_MSG_DEBUG, "Dumping static debug data...\n");
3489 /* Disable all blocks debug output */
3490 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3491 p_block_defs = s_block_defs[block_id];
3493 if (p_block_defs->has_dbg_bus[dev_data->chip_id])
3494 qed_wr(p_hwfn, p_ptt,
3495 p_block_defs->dbg_cycle_enable_addr, 0);
3498 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3499 qed_bus_set_framing_mode(p_hwfn,
3500 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3502 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3503 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3504 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3507 /* Dump all static debug lines for each relevant block */
3508 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3509 p_block_defs = s_block_defs[block_id];
3511 if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
3514 /* Dump static section params */
3515 offset += qed_grc_dump_mem_hdr(p_hwfn,
3518 p_block_defs->name, 0,
3519 block_dwords, 32, false,
3520 "STATIC", false, 0);
3522 if (dump && !dev_data->block_in_reset[block_id]) {
3524 p_block_defs->dbg_client_id[dev_data->chip_id];
3525 u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3526 u32 len = STATIC_DEBUG_LINE_DWORDS;
3528 /* Enable block's client */
3529 qed_bus_enable_clients(p_hwfn, p_ptt,
3530 BIT(dbg_client_id));
3532 for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
3534 /* Configure debug line ID */
3535 qed_config_dbg_line(p_hwfn,
3537 (enum block_id)block_id,
3541 /* Read debug line info */
3543 qed_grc_dump_addr_range(p_hwfn,
3551 /* Disable block's client and debug output */
3552 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3553 qed_wr(p_hwfn, p_ptt,
3554 p_block_defs->dbg_cycle_enable_addr, 0);
3556 /* All lines are invalid - dump zeros */
3558 memset(dump_buf + offset, 0,
3559 DWORDS_TO_BYTES(block_dwords));
3560 offset += block_dwords;
3565 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3566 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3572 /* Performs GRC Dump to the specified buffer.
3573 * Returns the dumped size in dwords.
3575 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3576 struct qed_ptt *p_ptt,
3578 bool dump, u32 *num_dumped_dwords)
3580 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3581 bool parities_masked = false;
3582 u8 i, port_mode = 0;
3585 *num_dumped_dwords = 0;
3587 /* Find port mode */
3589 switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3602 /* Update reset state */
3604 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3606 /* Dump global params */
3607 offset += qed_dump_common_global_params(p_hwfn,
3609 dump_buf + offset, dump, 4);
3610 offset += qed_dump_str_param(dump_buf + offset,
3611 dump, "dump-type", "grc-dump");
3612 offset += qed_dump_num_param(dump_buf + offset,
3615 qed_grc_get_param(p_hwfn,
3616 DBG_GRC_PARAM_NUM_LCIDS));
3617 offset += qed_dump_num_param(dump_buf + offset,
3620 qed_grc_get_param(p_hwfn,
3621 DBG_GRC_PARAM_NUM_LTIDS));
3622 offset += qed_dump_num_param(dump_buf + offset,
3623 dump, "num-ports", port_mode);
3625 /* Dump reset registers (dumped before taking blocks out of reset ) */
3626 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3627 offset += qed_grc_dump_reset_regs(p_hwfn,
3629 dump_buf + offset, dump);
3631 /* Take all blocks out of reset (using reset registers) */
3633 qed_grc_unreset_blocks(p_hwfn, p_ptt);
3634 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3637 /* Disable all parities using MFW command */
3638 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3639 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3640 if (!parities_masked) {
3642 "Failed to mask parities using MFW\n");
3643 if (qed_grc_get_param
3644 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3645 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3649 /* Dump modified registers (dumped before modifying them) */
3650 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3651 offset += qed_grc_dump_modified_regs(p_hwfn,
3653 dump_buf + offset, dump);
3657 (qed_grc_is_included(p_hwfn,
3658 DBG_GRC_PARAM_DUMP_IOR) ||
3659 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3660 qed_grc_stall_storms(p_hwfn, p_ptt, true);
3663 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3664 /* Dump all blocks except MCP */
3665 bool block_enable[MAX_BLOCK_ID];
3667 for (i = 0; i < MAX_BLOCK_ID; i++)
3668 block_enable[i] = true;
3669 block_enable[BLOCK_MCP] = false;
3670 offset += qed_grc_dump_registers(p_hwfn,
3675 block_enable, NULL, NULL);
3677 /* Dump special registers */
3678 offset += qed_grc_dump_special_regs(p_hwfn,
3680 dump_buf + offset, dump);
3684 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3687 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3688 offset += qed_grc_dump_mcp(p_hwfn,
3689 p_ptt, dump_buf + offset, dump);
3692 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3693 offset += qed_grc_dump_ctx(p_hwfn,
3694 p_ptt, dump_buf + offset, dump);
3696 /* Dump RSS memories */
3697 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3698 offset += qed_grc_dump_rss(p_hwfn,
3699 p_ptt, dump_buf + offset, dump);
3702 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3703 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3704 offset += qed_grc_dump_big_ram(p_hwfn,
3710 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
3711 offset += qed_grc_dump_iors(p_hwfn,
3712 p_ptt, dump_buf + offset, dump);
3715 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
3716 offset += qed_grc_dump_vfc(p_hwfn,
3717 p_ptt, dump_buf + offset, dump);
3720 if (qed_grc_is_included(p_hwfn,
3721 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3722 CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
3723 offset += qed_grc_dump_phy(p_hwfn,
3724 p_ptt, dump_buf + offset, dump);
3726 /* Dump static debug data */
3727 if (qed_grc_is_included(p_hwfn,
3728 DBG_GRC_PARAM_DUMP_STATIC) &&
3729 dev_data->bus.state == DBG_BUS_STATE_IDLE)
3730 offset += qed_grc_dump_static_debug(p_hwfn,
3732 dump_buf + offset, dump);
3734 /* Dump last section */
3735 offset += qed_dump_last_section(dump_buf, offset, dump);
3737 /* Unstall storms */
3738 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3739 qed_grc_stall_storms(p_hwfn, p_ptt, false);
3741 /* Clear parity status */
3742 qed_grc_clear_all_prty(p_hwfn, p_ptt);
3744 /* Enable all parities using MFW command */
3745 if (parities_masked)
3746 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3749 *num_dumped_dwords = offset;
3751 return DBG_STATUS_OK;
3754 /* Writes the specified failing Idle Check rule to the specified buffer.
3755 * Returns the dumped size in dwords.
3757 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3758 struct qed_ptt *p_ptt,
3763 const struct dbg_idle_chk_rule *rule,
3764 u16 fail_entry_id, u32 *cond_reg_values)
3766 const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
3768 [BIN_BUF_DBG_IDLE_CHK_REGS].
3769 ptr)[rule->reg_offset];
3770 const struct dbg_idle_chk_cond_reg *cond_regs = ®s[0].cond_reg;
3771 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3772 struct dbg_idle_chk_result_hdr *hdr =
3773 (struct dbg_idle_chk_result_hdr *)dump_buf;
3774 const struct dbg_idle_chk_info_reg *info_regs =
3775 ®s[rule->num_cond_regs].info_reg;
3776 u32 next_reg_offset = 0, i, offset = 0;
3779 /* Dump rule data */
3781 memset(hdr, 0, sizeof(*hdr));
3782 hdr->rule_id = rule_id;
3783 hdr->mem_entry_id = fail_entry_id;
3784 hdr->severity = rule->severity;
3785 hdr->num_dumped_cond_regs = rule->num_cond_regs;
3788 offset += IDLE_CHK_RESULT_HDR_DWORDS;
3790 /* Dump condition register values */
3791 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3792 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3794 /* Write register header */
3796 struct dbg_idle_chk_result_reg_hdr *reg_hdr =
3797 (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
3799 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3801 sizeof(struct dbg_idle_chk_result_reg_hdr));
3802 reg_hdr->start_entry = reg->start_entry;
3803 reg_hdr->size = reg->entry_size;
3804 SET_FIELD(reg_hdr->data,
3805 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3806 reg->num_entries > 1 || reg->start_entry > 0
3808 SET_FIELD(reg_hdr->data,
3809 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3811 /* Write register values */
3812 for (i = 0; i < reg_hdr->size;
3813 i++, next_reg_offset++, offset++)
3815 cond_reg_values[next_reg_offset];
3817 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3822 /* Dump info register values */
3823 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3824 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3828 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3832 /* Check if register's block is in reset */
3833 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3834 if (block_id >= MAX_BLOCK_ID) {
3835 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3839 if (!dev_data->block_in_reset[block_id]) {
3840 bool eval_mode = GET_FIELD(reg->mode.data,
3841 DBG_MODE_HDR_EVAL_MODE) > 0;
3842 bool mode_match = true;
3846 u16 modes_buf_offset =
3847 GET_FIELD(reg->mode.data,
3848 DBG_MODE_HDR_MODES_BUF_OFFSET);
3850 qed_is_mode_match(p_hwfn,
3856 GET_FIELD(reg->data,
3857 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3859 /* Write register header */
3860 struct dbg_idle_chk_result_reg_hdr *reg_hdr =
3861 (struct dbg_idle_chk_result_reg_hdr *)
3862 (dump_buf + offset);
3864 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3865 hdr->num_dumped_info_regs++;
3866 memset(reg_hdr, 0, sizeof(*reg_hdr));
3867 reg_hdr->size = reg->size;
3868 SET_FIELD(reg_hdr->data,
3869 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3870 rule->num_cond_regs + reg_id);
3872 /* Write register values */
3874 qed_grc_dump_addr_range(p_hwfn,
3887 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3889 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3890 u32 *dump_buf, bool dump,
3891 const struct dbg_idle_chk_rule *input_rules,
3892 u32 num_input_rules, u32 *num_failing_rules)
3894 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3895 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3900 *num_failing_rules = 0;
3901 for (i = 0; i < num_input_rules; i++) {
3902 const struct dbg_idle_chk_cond_reg *cond_regs;
3903 const struct dbg_idle_chk_rule *rule;
3904 const union dbg_idle_chk_reg *regs;
3905 u16 num_reg_entries = 1;
3906 bool check_rule = true;
3907 const u32 *imm_values;
3909 rule = &input_rules[i];
3910 regs = &((const union dbg_idle_chk_reg *)
3911 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
3913 cond_regs = ®s[0].cond_reg;
3914 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
3917 /* Check if all condition register blocks are out of reset, and
3918 * find maximal number of entries (all condition registers that
3919 * are memories must have the same size, which is > 1).
3921 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3923 u32 block_id = GET_FIELD(cond_regs[reg_id].data,
3924 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3926 if (block_id >= MAX_BLOCK_ID) {
3927 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3931 check_rule = !dev_data->block_in_reset[block_id];
3932 if (cond_regs[reg_id].num_entries > num_reg_entries)
3933 num_reg_entries = cond_regs[reg_id].num_entries;
3936 if (!check_rule && dump)
3940 u32 entry_dump_size =
3941 qed_idle_chk_dump_failure(p_hwfn,
3950 offset += num_reg_entries * entry_dump_size;
3951 (*num_failing_rules) += num_reg_entries;
3955 /* Go over all register entries (number of entries is the same
3956 * for all condition registers).
3958 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3959 /* Read current entry of all condition registers */
3960 u32 next_reg_offset = 0;
3962 for (reg_id = 0; reg_id < rule->num_cond_regs;
3964 const struct dbg_idle_chk_cond_reg *reg =
3967 /* Find GRC address (if it's a memory,the
3968 * address of the specific entry is calculated).
3971 GET_FIELD(reg->data,
3972 DBG_IDLE_CHK_COND_REG_ADDRESS);
3974 if (reg->num_entries > 1 ||
3975 reg->start_entry > 0) {
3976 u32 padded_entry_size =
3977 reg->entry_size > 1 ?
3978 roundup_pow_of_two(reg->entry_size) :
3981 addr += (reg->start_entry + entry_id) *
3985 /* Read registers */
3986 if (next_reg_offset + reg->entry_size >=
3987 IDLE_CHK_MAX_ENTRIES_SIZE) {
3989 "idle check registers entry is too large\n");
3994 qed_grc_dump_addr_range(p_hwfn,
4002 /* Call rule's condition function - a return value of
4003 * true indicates failure.
4005 if ((*cond_arr[rule->cond_id])(cond_reg_values,
4008 qed_idle_chk_dump_failure(p_hwfn,
4016 (*num_failing_rules)++;
4025 /* Performs Idle Check Dump to the specified buffer.
4026 * Returns the dumped size in dwords.
4028 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4029 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4031 u32 offset = 0, input_offset = 0, num_failing_rules = 0;
4032 u32 num_failing_rules_offset;
4034 /* Dump global params */
4035 offset += qed_dump_common_global_params(p_hwfn,
4037 dump_buf + offset, dump, 1);
4038 offset += qed_dump_str_param(dump_buf + offset,
4039 dump, "dump-type", "idle-chk");
4041 /* Dump idle check section header with a single parameter */
4042 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4043 num_failing_rules_offset = offset;
4044 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4045 while (input_offset <
4046 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4047 const struct dbg_idle_chk_cond_hdr *cond_hdr =
4048 (const struct dbg_idle_chk_cond_hdr *)
4049 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4051 bool eval_mode = GET_FIELD(cond_hdr->mode.data,
4052 DBG_MODE_HDR_EVAL_MODE) > 0;
4053 bool mode_match = true;
4057 u16 modes_buf_offset =
4058 GET_FIELD(cond_hdr->mode.data,
4059 DBG_MODE_HDR_MODES_BUF_OFFSET);
4061 mode_match = qed_is_mode_match(p_hwfn,
4066 u32 curr_failing_rules;
4069 qed_idle_chk_dump_rule_entries(p_hwfn,
4073 (const struct dbg_idle_chk_rule *)
4074 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4076 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4077 &curr_failing_rules);
4078 num_failing_rules += curr_failing_rules;
4081 input_offset += cond_hdr->data_size;
4084 /* Overwrite num_rules parameter */
4086 qed_dump_num_param(dump_buf + num_failing_rules_offset,
4087 dump, "num_rules", num_failing_rules);
4092 /* Finds the meta data image in NVRAM. */
4093 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4094 struct qed_ptt *p_ptt,
4096 u32 *nvram_offset_bytes,
4097 u32 *nvram_size_bytes)
4099 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4100 struct mcp_file_att file_att;
4102 /* Call NVRAM get file command */
4103 int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4105 DRV_MSG_CODE_NVM_GET_FILE_ATT,
4112 /* Check response */
4114 (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4115 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4117 /* Update return values */
4118 *nvram_offset_bytes = file_att.nvm_start_addr;
4119 *nvram_size_bytes = file_att.len;
4122 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4123 image_type, *nvram_offset_bytes, *nvram_size_bytes);
4125 /* Check alignment */
4126 if (*nvram_size_bytes & 0x3)
4127 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4128 return DBG_STATUS_OK;
4131 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4132 struct qed_ptt *p_ptt,
4133 u32 nvram_offset_bytes,
4134 u32 nvram_size_bytes, u32 *ret_buf)
4136 u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
4137 u32 bytes_to_copy, read_offset = 0;
4138 s32 bytes_left = nvram_size_bytes;
4142 "nvram_read: reading image of size %d bytes from NVRAM\n",
4147 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4149 /* Call NVRAM read command */
4150 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4151 DRV_MSG_CODE_NVM_READ_NVRAM,
4152 (nvram_offset_bytes +
4155 DRV_MB_PARAM_NVM_LEN_SHIFT),
4156 &ret_mcp_resp, &ret_mcp_param,
4158 (u32 *)((u8 *)ret_buf +
4160 return DBG_STATUS_NVRAM_READ_FAILED;
4162 /* Check response */
4163 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4164 return DBG_STATUS_NVRAM_READ_FAILED;
4166 /* Update read offset */
4167 read_offset += ret_read_size;
4168 bytes_left -= ret_read_size;
4169 } while (bytes_left > 0);
4171 return DBG_STATUS_OK;
4174 /* Get info on the MCP Trace data in the scratchpad:
4175 * - trace_data_grc_addr - the GRC address of the trace data
4176 * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
4179 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4180 struct qed_ptt *p_ptt,
4181 u32 *trace_data_grc_addr,
4182 u32 *trace_data_size_bytes)
4184 /* Read MCP trace section offsize structure from MCP scratchpad */
4185 u32 spad_trace_offsize = qed_rd(p_hwfn,
4187 MCP_SPAD_TRACE_OFFSIZE_ADDR);
4190 /* Extract MCP trace section GRC address from offsize structure (within
4193 *trace_data_grc_addr =
4194 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4196 /* Read signature from MCP trace section */
4197 signature = qed_rd(p_hwfn, p_ptt,
4198 *trace_data_grc_addr +
4199 offsetof(struct mcp_trace, signature));
4200 if (signature != MFW_TRACE_SIGNATURE)
4201 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4203 /* Read trace size from MCP trace section */
4204 *trace_data_size_bytes = qed_rd(p_hwfn,
4206 *trace_data_grc_addr +
4207 offsetof(struct mcp_trace, size));
4208 return DBG_STATUS_OK;
4211 /* Reads MCP trace meta data image from NVRAM.
4212 * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
4214 * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
4215 * Trace meta data starts (invalid when loaded from file)
4216 * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
4218 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4219 struct qed_ptt *p_ptt,
4220 u32 trace_data_size_bytes,
4221 u32 *running_bundle_id,
4222 u32 *trace_meta_offset_bytes,
4223 u32 *trace_meta_size_bytes)
4225 /* Read MCP trace section offsize structure from MCP scratchpad */
4226 u32 spad_trace_offsize = qed_rd(p_hwfn,
4228 MCP_SPAD_TRACE_OFFSIZE_ADDR);
4230 /* Find running bundle ID */
4231 u32 running_mfw_addr =
4232 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4233 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4234 u32 nvram_image_type;
4236 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4237 if (*running_bundle_id > 1)
4238 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4240 /* Find image in NVRAM */
4242 (*running_bundle_id ==
4243 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4245 return qed_find_nvram_image(p_hwfn,
4248 trace_meta_offset_bytes,
4249 trace_meta_size_bytes);
4252 /* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
4255 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4256 struct qed_ptt *p_ptt,
4257 u32 nvram_offset_in_bytes,
4258 u32 size_in_bytes, u32 *buf)
4260 u8 *byte_buf = (u8 *)buf;
4264 /* Read meta data from NVRAM */
4265 enum dbg_status status = qed_nvram_read(p_hwfn,
4267 nvram_offset_in_bytes,
4271 if (status != DBG_STATUS_OK)
4274 /* Extract and check first signature */
4275 signature = qed_read_unaligned_dword(byte_buf);
4276 byte_buf += sizeof(u32);
4277 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
4278 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4280 /* Extract number of modules */
4281 modules_num = *(byte_buf++);
4283 /* Skip all modules */
4284 for (i = 0; i < modules_num; i++) {
4285 u8 module_len = *(byte_buf++);
4287 byte_buf += module_len;
4290 /* Extract and check second signature */
4291 signature = qed_read_unaligned_dword(byte_buf);
4292 byte_buf += sizeof(u32);
4293 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
4294 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4295 return DBG_STATUS_OK;
4298 /* Dump MCP Trace */
4299 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4300 struct qed_ptt *p_ptt,
4302 bool dump, u32 *num_dumped_dwords)
4304 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4305 u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4306 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4307 enum dbg_status status;
4311 mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4313 *num_dumped_dwords = 0;
4315 /* Get trace data info */
4316 status = qed_mcp_trace_get_data_info(p_hwfn,
4318 &trace_data_grc_addr,
4319 &trace_data_size_bytes);
4320 if (status != DBG_STATUS_OK)
4323 /* Dump global params */
4324 offset += qed_dump_common_global_params(p_hwfn,
4326 dump_buf + offset, dump, 1);
4327 offset += qed_dump_str_param(dump_buf + offset,
4328 dump, "dump-type", "mcp-trace");
4330 /* Halt MCP while reading from scratchpad so the read data will be
4331 * consistent if halt fails, MCP trace is taken anyway, with a small
4332 * risk that it may be corrupt.
4334 if (dump && mcp_access) {
4335 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4337 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4340 /* Find trace data size */
4341 trace_data_size_dwords =
4342 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4345 /* Dump trace data section header and param */
4346 offset += qed_dump_section_hdr(dump_buf + offset,
4347 dump, "mcp_trace_data", 1);
4348 offset += qed_dump_num_param(dump_buf + offset,
4349 dump, "size", trace_data_size_dwords);
4351 /* Read trace data from scratchpad into dump buffer */
4352 offset += qed_grc_dump_addr_range(p_hwfn,
4356 BYTES_TO_DWORDS(trace_data_grc_addr),
4357 trace_data_size_dwords);
4359 /* Resume MCP (only if halt succeeded) */
4360 if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
4361 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4363 /* Dump trace meta section header */
4364 offset += qed_dump_section_hdr(dump_buf + offset,
4365 dump, "mcp_trace_meta", 1);
4367 /* Read trace meta info */
4369 status = qed_mcp_trace_get_meta_info(p_hwfn,
4371 trace_data_size_bytes,
4373 &trace_meta_offset_bytes,
4374 &trace_meta_size_bytes);
4375 if (status == DBG_STATUS_OK)
4376 trace_meta_size_dwords =
4377 BYTES_TO_DWORDS(trace_meta_size_bytes);
4380 /* Dump trace meta size param */
4381 offset += qed_dump_num_param(dump_buf + offset,
4382 dump, "size", trace_meta_size_dwords);
4384 /* Read trace meta image into dump buffer */
4385 if (dump && trace_meta_size_dwords)
4386 status = qed_mcp_trace_read_meta(p_hwfn,
4388 trace_meta_offset_bytes,
4389 trace_meta_size_bytes,
4391 if (status == DBG_STATUS_OK)
4392 offset += trace_meta_size_dwords;
4394 *num_dumped_dwords = offset;
4396 /* If no mcp access, indicate that the dump doesn't contain the meta
4399 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4403 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4404 struct qed_ptt *p_ptt,
4406 bool dump, u32 *num_dumped_dwords)
4408 u32 offset = 0, dwords_read, size_param_offset;
4411 *num_dumped_dwords = 0;
4413 /* Dump global params */
4414 offset += qed_dump_common_global_params(p_hwfn,
4416 dump_buf + offset, dump, 1);
4417 offset += qed_dump_str_param(dump_buf + offset,
4418 dump, "dump-type", "reg-fifo");
4420 /* Dump fifo data section header and param. The size param is 0 for now,
4421 * and is overwritten after reading the FIFO.
4423 offset += qed_dump_section_hdr(dump_buf + offset,
4424 dump, "reg_fifo_data", 1);
4425 size_param_offset = offset;
4426 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4429 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4430 * test how much data is available, except for reading it.
4432 offset += REG_FIFO_DEPTH_DWORDS;
4433 *num_dumped_dwords = offset;
4434 return DBG_STATUS_OK;
4437 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4438 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4440 /* Pull available data from fifo. Use DMAE since this is widebus memory
4441 * and must be accessed atomically. Test for dwords_read not passing
4442 * buffer size since more entries could be added to the buffer as we are
4445 for (dwords_read = 0;
4446 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4447 dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
4448 REG_FIFO_ELEMENT_DWORDS) {
4449 if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
4450 (u64)(uintptr_t)(&dump_buf[offset]),
4451 REG_FIFO_ELEMENT_DWORDS, 0))
4452 return DBG_STATUS_DMAE_FAILED;
4453 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4454 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4457 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4460 *num_dumped_dwords = offset;
4461 return DBG_STATUS_OK;
4465 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4466 struct qed_ptt *p_ptt,
4468 bool dump, u32 *num_dumped_dwords)
4470 u32 offset = 0, dwords_read, size_param_offset;
4473 *num_dumped_dwords = 0;
4475 /* Dump global params */
4476 offset += qed_dump_common_global_params(p_hwfn,
4478 dump_buf + offset, dump, 1);
4479 offset += qed_dump_str_param(dump_buf + offset,
4480 dump, "dump-type", "igu-fifo");
4482 /* Dump fifo data section header and param. The size param is 0 for now,
4483 * and is overwritten after reading the FIFO.
4485 offset += qed_dump_section_hdr(dump_buf + offset,
4486 dump, "igu_fifo_data", 1);
4487 size_param_offset = offset;
4488 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4491 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4492 * test how much data is available, except for reading it.
4494 offset += IGU_FIFO_DEPTH_DWORDS;
4495 *num_dumped_dwords = offset;
4496 return DBG_STATUS_OK;
4499 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4500 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4502 /* Pull available data from fifo. Use DMAE since this is widebus memory
4503 * and must be accessed atomically. Test for dwords_read not passing
4504 * buffer size since more entries could be added to the buffer as we are
4507 for (dwords_read = 0;
4508 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4509 dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
4510 IGU_FIFO_ELEMENT_DWORDS) {
4511 if (qed_dmae_grc2host(p_hwfn, p_ptt,
4512 IGU_REG_ERROR_HANDLING_MEMORY,
4513 (u64)(uintptr_t)(&dump_buf[offset]),
4514 IGU_FIFO_ELEMENT_DWORDS, 0))
4515 return DBG_STATUS_DMAE_FAILED;
4516 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4517 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4520 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4523 *num_dumped_dwords = offset;
4524 return DBG_STATUS_OK;
4527 /* Protection Override dump */
4528 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4529 struct qed_ptt *p_ptt,
4532 u32 *num_dumped_dwords)
4534 u32 offset = 0, size_param_offset, override_window_dwords;
4536 *num_dumped_dwords = 0;
4538 /* Dump global params */
4539 offset += qed_dump_common_global_params(p_hwfn,
4541 dump_buf + offset, dump, 1);
4542 offset += qed_dump_str_param(dump_buf + offset,
4543 dump, "dump-type", "protection-override");
4545 /* Dump data section header and param. The size param is 0 for now, and
4546 * is overwritten after reading the data.
4548 offset += qed_dump_section_hdr(dump_buf + offset,
4549 dump, "protection_override_data", 1);
4550 size_param_offset = offset;
4551 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4554 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4555 *num_dumped_dwords = offset;
4556 return DBG_STATUS_OK;
4559 /* Add override window info to buffer */
4560 override_window_dwords =
4561 qed_rd(p_hwfn, p_ptt,
4562 GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4563 PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4564 if (qed_dmae_grc2host(p_hwfn, p_ptt,
4565 GRC_REG_PROTECTION_OVERRIDE_WINDOW,
4566 (u64)(uintptr_t)(dump_buf + offset),
4567 override_window_dwords, 0))
4568 return DBG_STATUS_DMAE_FAILED;
4569 offset += override_window_dwords;
4570 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4571 override_window_dwords);
4573 *num_dumped_dwords = offset;
4574 return DBG_STATUS_OK;
4577 /* Performs FW Asserts Dump to the specified buffer.
4578 * Returns the dumped size in dwords.
4580 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4581 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4583 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4584 struct fw_asserts_ram_section *asserts;
4585 char storm_letter_str[2] = "?";
4586 struct fw_info fw_info;
4590 /* Dump global params */
4591 offset += qed_dump_common_global_params(p_hwfn,
4593 dump_buf + offset, dump, 1);
4594 offset += qed_dump_str_param(dump_buf + offset,
4595 dump, "dump-type", "fw-asserts");
4596 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4597 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4598 u32 last_list_idx, addr;
4600 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
4603 /* Read FW info for the current Storm */
4604 qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4606 asserts = &fw_info.fw_asserts_section;
4608 /* Dump FW Asserts section header and params */
4609 storm_letter_str[0] = s_storm_defs[storm_id].letter;
4610 offset += qed_dump_section_hdr(dump_buf + offset, dump,
4612 offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
4614 offset += qed_dump_num_param(dump_buf + offset, dump, "size",
4615 asserts->list_element_dword_size);
4618 offset += asserts->list_element_dword_size;
4622 /* Read and dump FW Asserts data */
4623 fw_asserts_section_addr =
4624 s_storm_defs[storm_id].sem_fast_mem_addr +
4625 SEM_FAST_REG_INT_RAM +
4626 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4627 next_list_idx_addr =
4628 fw_asserts_section_addr +
4629 DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4630 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4631 last_list_idx = (next_list_idx > 0
4633 : asserts->list_num_elements) - 1;
4634 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4635 asserts->list_dword_offset +
4636 last_list_idx * asserts->list_element_dword_size;
4638 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4641 asserts->list_element_dword_size);
4644 /* Dump last section */
4645 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
4649 /***************************** Public Functions *******************************/
4651 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
4653 /* Convert binary data to debug arrays */
4654 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
4657 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4658 s_dbg_arrays[buf_id].ptr =
4659 (u32 *)(bin_ptr + buf_array[buf_id].offset);
4660 s_dbg_arrays[buf_id].size_in_dwords =
4661 BYTES_TO_DWORDS(buf_array[buf_id].length);
4664 return DBG_STATUS_OK;
4667 /* Assign default GRC param values */
4668 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4670 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4673 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
4674 dev_data->grc.param_val[i] =
4675 s_grc_param_defs[i].default_val[dev_data->chip_id];
4678 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4679 struct qed_ptt *p_ptt,
4682 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4685 if (status != DBG_STATUS_OK)
4687 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4688 !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
4689 !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
4690 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
4691 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
4692 return DBG_STATUS_DBG_ARRAY_NOT_SET;
4693 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4696 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
4697 struct qed_ptt *p_ptt,
4699 u32 buf_size_in_dwords,
4700 u32 *num_dumped_dwords)
4702 u32 needed_buf_size_in_dwords;
4703 enum dbg_status status;
4705 status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
4706 &needed_buf_size_in_dwords);
4708 *num_dumped_dwords = 0;
4709 if (status != DBG_STATUS_OK)
4711 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4712 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4715 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
4717 /* Revert GRC params to their default */
4718 qed_dbg_grc_set_params_default(p_hwfn);
4723 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4724 struct qed_ptt *p_ptt,
4727 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4728 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4731 if (status != DBG_STATUS_OK)
4733 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4734 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
4735 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
4736 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
4737 return DBG_STATUS_DBG_ARRAY_NOT_SET;
4738 if (!dev_data->idle_chk.buf_size_set) {
4739 dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
4742 dev_data->idle_chk.buf_size_set = true;
4745 *buf_size = dev_data->idle_chk.buf_size;
4746 return DBG_STATUS_OK;
4749 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
4750 struct qed_ptt *p_ptt,
4752 u32 buf_size_in_dwords,
4753 u32 *num_dumped_dwords)
4755 u32 needed_buf_size_in_dwords;
4756 enum dbg_status status;
4758 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
4759 &needed_buf_size_in_dwords);
4761 *num_dumped_dwords = 0;
4762 if (status != DBG_STATUS_OK)
4764 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4765 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4767 /* Update reset state */
4768 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4770 /* Idle Check Dump */
4771 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
4773 /* Revert GRC params to their default */
4774 qed_dbg_grc_set_params_default(p_hwfn);
4776 return DBG_STATUS_OK;
4779 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4780 struct qed_ptt *p_ptt,
4783 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4786 if (status != DBG_STATUS_OK)
4788 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4791 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4792 struct qed_ptt *p_ptt,
4794 u32 buf_size_in_dwords,
4795 u32 *num_dumped_dwords)
4797 u32 needed_buf_size_in_dwords;
4798 enum dbg_status status;
4800 /* validate buffer size */
4802 qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
4803 &needed_buf_size_in_dwords);
4805 if (status != DBG_STATUS_OK &&
4806 status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
4809 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4810 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4812 /* Update reset state */
4813 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4816 status = qed_mcp_trace_dump(p_hwfn,
4817 p_ptt, dump_buf, true, num_dumped_dwords);
4819 /* Revert GRC params to their default */
4820 qed_dbg_grc_set_params_default(p_hwfn);
4825 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4826 struct qed_ptt *p_ptt,
4829 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4832 if (status != DBG_STATUS_OK)
4834 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4837 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4838 struct qed_ptt *p_ptt,
4840 u32 buf_size_in_dwords,
4841 u32 *num_dumped_dwords)
4843 u32 needed_buf_size_in_dwords;
4844 enum dbg_status status;
4846 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
4847 &needed_buf_size_in_dwords);
4849 *num_dumped_dwords = 0;
4850 if (status != DBG_STATUS_OK)
4852 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4853 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4855 /* Update reset state */
4856 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4858 status = qed_reg_fifo_dump(p_hwfn,
4859 p_ptt, dump_buf, true, num_dumped_dwords);
4861 /* Revert GRC params to their default */
4862 qed_dbg_grc_set_params_default(p_hwfn);
4867 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4868 struct qed_ptt *p_ptt,
4871 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4874 if (status != DBG_STATUS_OK)
4876 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4879 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4880 struct qed_ptt *p_ptt,
4882 u32 buf_size_in_dwords,
4883 u32 *num_dumped_dwords)
4885 u32 needed_buf_size_in_dwords;
4886 enum dbg_status status;
4888 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
4889 &needed_buf_size_in_dwords);
4891 *num_dumped_dwords = 0;
4892 if (status != DBG_STATUS_OK)
4894 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4895 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4897 /* Update reset state */
4898 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4900 status = qed_igu_fifo_dump(p_hwfn,
4901 p_ptt, dump_buf, true, num_dumped_dwords);
4902 /* Revert GRC params to their default */
4903 qed_dbg_grc_set_params_default(p_hwfn);
4909 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4910 struct qed_ptt *p_ptt,
4913 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4916 if (status != DBG_STATUS_OK)
4918 return qed_protection_override_dump(p_hwfn,
4919 p_ptt, NULL, false, buf_size);
4922 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
4923 struct qed_ptt *p_ptt,
4925 u32 buf_size_in_dwords,
4926 u32 *num_dumped_dwords)
4928 u32 needed_buf_size_in_dwords;
4929 enum dbg_status status;
4931 status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
4932 &needed_buf_size_in_dwords);
4934 *num_dumped_dwords = 0;
4935 if (status != DBG_STATUS_OK)
4937 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4938 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4940 /* Update reset state */
4941 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4943 status = qed_protection_override_dump(p_hwfn,
4946 true, num_dumped_dwords);
4948 /* Revert GRC params to their default */
4949 qed_dbg_grc_set_params_default(p_hwfn);
4954 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4955 struct qed_ptt *p_ptt,
4958 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4961 if (status != DBG_STATUS_OK)
4964 /* Update reset state */
4965 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4966 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
4967 return DBG_STATUS_OK;
4970 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4971 struct qed_ptt *p_ptt,
4973 u32 buf_size_in_dwords,
4974 u32 *num_dumped_dwords)
4976 u32 needed_buf_size_in_dwords;
4977 enum dbg_status status;
4979 status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
4980 &needed_buf_size_in_dwords);
4982 *num_dumped_dwords = 0;
4983 if (status != DBG_STATUS_OK)
4985 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4986 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4988 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
4989 return DBG_STATUS_OK;
4992 /******************************* Data Types **********************************/
4994 struct mcp_trace_format {
4996 #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
4997 #define MCP_TRACE_FORMAT_MODULE_SHIFT 0
4998 #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
4999 #define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
5000 #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
5001 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
5002 #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
5003 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
5004 #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
5005 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
5006 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
5007 #define MCP_TRACE_FORMAT_LEN_SHIFT 24
5011 struct mcp_trace_meta {
5015 struct mcp_trace_format *formats;
5018 /* Reg fifo element */
5019 struct reg_fifo_element {
5021 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
5022 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
5023 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
5024 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
5025 #define REG_FIFO_ELEMENT_PF_SHIFT 24
5026 #define REG_FIFO_ELEMENT_PF_MASK 0xf
5027 #define REG_FIFO_ELEMENT_VF_SHIFT 28
5028 #define REG_FIFO_ELEMENT_VF_MASK 0xff
5029 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
5030 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
5031 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
5032 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
5033 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
5034 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
5035 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
5036 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
5037 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
5038 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
5041 /* IGU fifo element */
5042 struct igu_fifo_element {
5044 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
5045 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
5046 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
5047 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
5048 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
5049 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
5050 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
5051 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
5052 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
5053 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
5056 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
5057 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
5058 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
5059 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
5063 struct igu_fifo_wr_data {
5065 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
5066 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
5067 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
5068 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
5069 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
5070 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
5071 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
5072 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
5073 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
5074 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
5075 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
5076 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
5079 struct igu_fifo_cleanup_wr_data {
5081 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
5082 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
5083 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
5084 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
5085 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
5086 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
5087 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
5088 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
5091 /* Protection override element */
5092 struct protection_override_element {
5094 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
5095 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
5096 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
5097 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
5098 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
5099 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
5100 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
5101 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
5102 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
5103 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
5104 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
5105 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
5108 enum igu_fifo_sources {
5122 enum igu_fifo_addr_types {
5123 IGU_ADDR_TYPE_MSIX_MEM,
5124 IGU_ADDR_TYPE_WRITE_PBA,
5125 IGU_ADDR_TYPE_WRITE_INT_ACK,
5126 IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5127 IGU_ADDR_TYPE_READ_INT,
5128 IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5129 IGU_ADDR_TYPE_RESERVED
5132 struct igu_fifo_addr_data {
5137 enum igu_fifo_addr_types type;
5140 /******************************** Constants **********************************/
5142 #define MAX_MSG_LEN 1024
5143 #define MCP_TRACE_MAX_MODULE_LEN 8
5144 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
5145 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5146 (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5147 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
5148 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
5149 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
5151 /********************************* Macros ************************************/
5153 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
5155 /***************************** Constant Arrays *******************************/
5157 /* Status string array */
5158 static const char * const s_status_str[] = {
5159 "Operation completed successfully",
5160 "Debug application version wasn't set",
5161 "Unsupported debug application version",
5162 "The debug block wasn't reset since the last recording",
5163 "Invalid arguments",
5164 "The debug output was already set",
5165 "Invalid PCI buffer size",
5166 "PCI buffer allocation failed",
5167 "A PCI buffer wasn't allocated",
5168 "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5169 "GRC/Timestamp input overlap in cycle dword 0",
5170 "Cannot record Storm data since the entire recording cycle is used by HW",
5171 "The Storm was already enabled",
5172 "The specified Storm wasn't enabled",
5173 "The block was already enabled",
5174 "The specified block wasn't enabled",
5175 "No input was enabled for recording",
5176 "Filters and triggers are not allowed when recording in 64b units",
5177 "The filter was already enabled",
5178 "The trigger was already enabled",
5179 "The trigger wasn't enabled",
5180 "A constraint can be added only after a filter was enabled or a trigger state was added",
5181 "Cannot add more than 3 trigger states",
5182 "Cannot add more than 4 constraints per filter or trigger state",
5183 "The recording wasn't started",
5184 "A trigger was configured, but it didn't trigger",
5185 "No data was recorded",
5186 "Dump buffer is too small",
5187 "Dumped data is not aligned to chunks",
5189 "Failed allocating virtual memory",
5190 "The input block is in reset",
5191 "Invalid MCP trace signature found in NVRAM",
5192 "Invalid bundle ID found in NVRAM",
5193 "Failed getting NVRAM image",
5194 "NVRAM image is not dword-aligned",
5195 "Failed reading from NVRAM",
5196 "Idle check parsing failed",
5197 "MCP Trace data is corrupt",
5198 "Dump doesn't contain meta data - it must be provided in an image file",
5199 "Failed to halt MCP",
5200 "Failed to resume MCP after halt",
5201 "DMAE transaction failed",
5202 "Failed to empty SEMI sync FIFO",
5203 "IGU FIFO data is corrupt",
5204 "MCP failed to mask parities",
5205 "FW Asserts parsing failed",
5206 "GRC FIFO data is corrupt",
5207 "Protection Override data is corrupt",
5208 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5209 "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
5212 /* Idle check severity names array */
5213 static const char * const s_idle_chk_severity_str[] = {
5215 "Error if no traffic",
5219 /* MCP Trace level names array */
5220 static const char * const s_mcp_trace_level_str[] = {
5226 /* Parsing strings */
5227 static const char * const s_access_strs[] = {
5232 static const char * const s_privilege_strs[] = {
5239 static const char * const s_protection_strs[] = {
5250 static const char * const s_master_strs[] = {
5269 static const char * const s_reg_fifo_error_strs[] = {
5271 "address doesn't belong to any block",
5272 "reserved address in block or write to read-only address",
5273 "privilege/protection mismatch",
5274 "path isolation error"
5277 static const char * const s_igu_fifo_source_strs[] = {
5291 static const char * const s_igu_fifo_error_strs[] = {
5294 "function disabled",
5295 "VF sent command to attnetion address",
5296 "host sent prod update command",
5297 "read of during interrupt register while in MIMD mode",
5298 "access to PXP BAR reserved address",
5299 "producer update command to attention index",
5301 "SB index not valid",
5302 "SB relative index and FID not found",
5304 "command with error flag asserted (PCI error or CAU discard)",
5305 "VF sent cleanup and RF cleanup is disabled",
5306 "cleanup command on type bigger than 4"
5309 /* IGU FIFO address data */
5310 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5311 {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
5312 {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
5313 {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
5314 {0x201, 0x201, "Write PBA[64:127]", "reserved",
5315 IGU_ADDR_TYPE_WRITE_PBA},
5316 {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
5317 {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
5318 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5319 IGU_ADDR_TYPE_WRITE_INT_ACK},
5320 {0x5f0, 0x5f0, "Attention bits update", NULL,
5321 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5322 {0x5f1, 0x5f1, "Attention bits set", NULL,
5323 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5324 {0x5f2, 0x5f2, "Attention bits clear", NULL,
5325 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5326 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5327 IGU_ADDR_TYPE_READ_INT},
5328 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5329 IGU_ADDR_TYPE_READ_INT},
5330 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
5331 IGU_ADDR_TYPE_READ_INT},
5332 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
5333 IGU_ADDR_TYPE_READ_INT},
5334 {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
5335 {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
5338 /******************************** Variables **********************************/
5340 /* MCP Trace meta data - used in case the dump doesn't contain the meta data
5341 * (e.g. due to no NVRAM access).
5343 static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
5345 /* Temporary buffer, used for print size calculations */
5346 static char s_temp_buf[MAX_MSG_LEN];
5348 /***************************** Public Functions *******************************/
5350 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
5352 /* Convert binary data to debug arrays */
5353 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5356 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5357 s_dbg_arrays[buf_id].ptr =
5358 (u32 *)(bin_ptr + buf_array[buf_id].offset);
5359 s_dbg_arrays[buf_id].size_in_dwords =
5360 BYTES_TO_DWORDS(buf_array[buf_id].length);
5363 return DBG_STATUS_OK;
5366 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
5368 return (a + b) % size;
5371 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
5373 return (size + a - b) % size;
5376 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5377 * bytes) and returns them as a dword value. the specified buffer offset is
5380 static u32 qed_read_from_cyclic_buf(void *buf,
5382 u32 buf_size, u8 num_bytes_to_read)
5384 u8 *bytes_buf = (u8 *)buf;
5389 val_ptr = (u8 *)&val;
5391 for (i = 0; i < num_bytes_to_read; i++) {
5392 val_ptr[i] = bytes_buf[*offset];
5393 *offset = qed_cyclic_add(*offset, 1, buf_size);
5399 /* Reads and returns the next byte from the specified buffer.
5400 * The specified buffer offset is updated.
5402 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
5404 return ((u8 *)buf)[(*offset)++];
5407 /* Reads and returns the next dword from the specified buffer.
5408 * The specified buffer offset is updated.
5410 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
5412 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
5418 /* Reads the next string from the specified buffer, and copies it to the
5419 * specified pointer. The specified buffer offset is updated.
5421 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
5423 const char *source_str = &((const char *)buf)[*offset];
5425 strncpy(dest, source_str, size);
5426 dest[size - 1] = '\0';
5430 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5431 * If the specified buffer in NULL, a temporary buffer pointer is returned.
5433 static char *qed_get_buf_ptr(void *buf, u32 offset)
5435 return buf ? (char *)buf + offset : s_temp_buf;
5438 /* Reads a param from the specified buffer. Returns the number of dwords read.
5439 * If the returned str_param is NULL, the param is numeric and its value is
5440 * returned in num_param.
5441 * Otheriwise, the param is a string and its pointer is returned in str_param.
5443 static u32 qed_read_param(u32 *dump_buf,
5444 const char **param_name,
5445 const char **param_str_val, u32 *param_num_val)
5447 char *char_buf = (char *)dump_buf;
5448 u32 offset = 0; /* In bytes */
5450 /* Extract param name */
5451 *param_name = char_buf;
5452 offset += strlen(*param_name) + 1;
5454 /* Check param type */
5455 if (*(char_buf + offset++)) {
5457 *param_str_val = char_buf + offset;
5458 offset += strlen(*param_str_val) + 1;
5460 offset += (4 - (offset & 0x3));
5463 *param_str_val = NULL;
5465 offset += (4 - (offset & 0x3));
5466 *param_num_val = *(u32 *)(char_buf + offset);
5473 /* Reads a section header from the specified buffer.
5474 * Returns the number of dwords read.
5476 static u32 qed_read_section_hdr(u32 *dump_buf,
5477 const char **section_name,
5478 u32 *num_section_params)
5480 const char *param_str_val;
5482 return qed_read_param(dump_buf,
5483 section_name, ¶m_str_val, num_section_params);
5486 /* Reads section params from the specified buffer and prints them to the results
5487 * buffer. Returns the number of dwords read.
5489 static u32 qed_print_section_params(u32 *dump_buf,
5490 u32 num_section_params,
5491 char *results_buf, u32 *num_chars_printed)
5493 u32 i, dump_offset = 0, results_offset = 0;
5495 for (i = 0; i < num_section_params; i++) {
5496 const char *param_name;
5497 const char *param_str_val;
5498 u32 param_num_val = 0;
5500 dump_offset += qed_read_param(dump_buf + dump_offset,
5502 ¶m_str_val, ¶m_num_val);
5506 sprintf(qed_get_buf_ptr(results_buf,
5508 "%s: %s\n", param_name, param_str_val);
5509 else if (strcmp(param_name, "fw-timestamp"))
5512 sprintf(qed_get_buf_ptr(results_buf,
5514 "%s: %d\n", param_name, param_num_val);
5518 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
5519 *num_chars_printed = results_offset;
5523 const char *qed_dbg_get_status_str(enum dbg_status status)
5526 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
5529 /* Parses the idle check rules and returns the number of characters printed.
5530 * In case of parsing error, returns 0.
5532 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
5536 bool print_fw_idle_chk,
5538 u32 *num_errors, u32 *num_warnings)
5540 u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
5546 /* Go over dumped results */
5547 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
5549 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
5550 struct dbg_idle_chk_result_hdr *hdr;
5551 const char *parsing_str;
5552 u32 parsing_str_offset;
5553 const char *lsi_msg;
5557 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
5559 (const struct dbg_idle_chk_rule_parsing_data *)
5560 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
5562 parsing_str_offset =
5563 GET_FIELD(rule_parsing_data->data,
5564 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
5566 GET_FIELD(rule_parsing_data->data,
5567 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
5568 parsing_str = &((const char *)
5569 s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
5570 [parsing_str_offset];
5571 lsi_msg = parsing_str;
5573 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
5576 /* Skip rule header */
5577 dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
5579 /* Update errors/warnings count */
5580 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
5581 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
5586 /* Print rule severity */
5588 sprintf(qed_get_buf_ptr(results_buf,
5589 results_offset), "%s: ",
5590 s_idle_chk_severity_str[hdr->severity]);
5592 /* Print rule message */
5594 parsing_str += strlen(parsing_str) + 1;
5596 sprintf(qed_get_buf_ptr(results_buf,
5597 results_offset), "%s.",
5599 print_fw_idle_chk ? parsing_str : lsi_msg);
5600 parsing_str += strlen(parsing_str) + 1;
5602 /* Print register values */
5604 sprintf(qed_get_buf_ptr(results_buf,
5605 results_offset), " Registers:");
5607 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
5609 struct dbg_idle_chk_result_reg_hdr *reg_hdr
5610 = (struct dbg_idle_chk_result_reg_hdr *)
5613 GET_FIELD(reg_hdr->data,
5614 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
5616 GET_FIELD(reg_hdr->data,
5617 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
5619 /* Skip reg header */
5621 (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
5623 /* Skip register names until the required reg_id is
5626 for (; reg_id > curr_reg_id;
5628 parsing_str += strlen(parsing_str) + 1);
5631 sprintf(qed_get_buf_ptr(results_buf,
5632 results_offset), " %s",
5634 if (i < hdr->num_dumped_cond_regs && is_mem)
5636 sprintf(qed_get_buf_ptr(results_buf,
5638 "[%d]", hdr->mem_entry_id +
5639 reg_hdr->start_entry);
5641 sprintf(qed_get_buf_ptr(results_buf,
5642 results_offset), "=");
5643 for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
5645 sprintf(qed_get_buf_ptr(results_buf,
5648 if (j < reg_hdr->size - 1)
5650 sprintf(qed_get_buf_ptr
5652 results_offset), ",");
5657 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
5660 /* Check if end of dump buffer was exceeded */
5661 if (dump_buf > dump_buf_end)
5663 return results_offset;
5666 /* Parses an idle check dump buffer.
5667 * If result_buf is not NULL, the idle check results are printed to it.
5668 * In any case, the required results buffer size is assigned to
5669 * parsed_results_bytes.
5670 * The parsing status is returned.
5672 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
5674 u32 num_dumped_dwords,
5676 u32 *parsed_results_bytes,
5680 const char *section_name, *param_name, *param_str_val;
5681 u32 *dump_buf_end = dump_buf + num_dumped_dwords;
5682 u32 num_section_params = 0, num_rules;
5683 u32 results_offset = 0; /* Offset in results_buf in bytes */
5685 *parsed_results_bytes = 0;
5688 if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
5689 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
5690 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5692 /* Read global_params section */
5693 dump_buf += qed_read_section_hdr(dump_buf,
5694 §ion_name, &num_section_params);
5695 if (strcmp(section_name, "global_params"))
5696 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5698 /* Print global params */
5699 dump_buf += qed_print_section_params(dump_buf,
5701 results_buf, &results_offset);
5703 /* Read idle_chk section */
5704 dump_buf += qed_read_section_hdr(dump_buf,
5705 §ion_name, &num_section_params);
5706 if (strcmp(section_name, "idle_chk") || num_section_params != 1)
5707 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5709 dump_buf += qed_read_param(dump_buf,
5710 ¶m_name, ¶m_str_val, &num_rules);
5711 if (strcmp(param_name, "num_rules") != 0)
5712 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5715 u32 rules_print_size;
5717 /* Print FW output */
5719 sprintf(qed_get_buf_ptr(results_buf,
5721 "FW_IDLE_CHECK:\n");
5723 qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
5724 dump_buf_end, num_rules,
5728 results_offset : NULL,
5729 num_errors, num_warnings);
5730 results_offset += rules_print_size;
5731 if (rules_print_size == 0)
5732 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5734 /* Print LSI output */
5736 sprintf(qed_get_buf_ptr(results_buf,
5738 "\nLSI_IDLE_CHECK:\n");
5740 qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
5741 dump_buf_end, num_rules,
5745 results_offset : NULL,
5746 num_errors, num_warnings);
5747 results_offset += rules_print_size;
5748 if (rules_print_size == 0)
5749 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5752 /* Print errors/warnings count */
5755 sprintf(qed_get_buf_ptr(results_buf,
5757 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
5758 *num_errors, *num_warnings);
5759 } else if (*num_warnings) {
5761 sprintf(qed_get_buf_ptr(results_buf,
5763 "\nIdle Check completed successfuly (with %d warnings)\n",
5767 sprintf(qed_get_buf_ptr(results_buf,
5769 "\nIdle Check completed successfuly\n");
5772 /* Add 1 for string NULL termination */
5773 *parsed_results_bytes = results_offset + 1;
5774 return DBG_STATUS_OK;
5777 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
5779 u32 num_dumped_dwords,
5780 u32 *results_buf_size)
5782 u32 num_errors, num_warnings;
5784 return qed_parse_idle_chk_dump(p_hwfn,
5789 &num_errors, &num_warnings);
5792 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
5794 u32 num_dumped_dwords,
5796 u32 *num_errors, u32 *num_warnings)
5798 u32 parsed_buf_size;
5800 return qed_parse_idle_chk_dump(p_hwfn,
5805 num_errors, num_warnings);
5808 /* Frees the specified MCP Trace meta data */
5809 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
5810 struct mcp_trace_meta *meta)
5814 /* Release modules */
5815 if (meta->modules) {
5816 for (i = 0; i < meta->modules_num; i++)
5817 kfree(meta->modules[i]);
5818 kfree(meta->modules);
5821 /* Release formats */
5822 if (meta->formats) {
5823 for (i = 0; i < meta->formats_num; i++)
5824 kfree(meta->formats[i].format_str);
5825 kfree(meta->formats);
5829 /* Allocates and fills MCP Trace meta data based on the specified meta data
5831 * Returns debug status code.
5833 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
5834 const u32 *meta_buf,
5835 struct mcp_trace_meta *meta)
5837 u8 *meta_buf_bytes = (u8 *)meta_buf;
5838 u32 offset = 0, signature, i;
5840 memset(meta, 0, sizeof(*meta));
5842 /* Read first signature */
5843 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
5844 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
5845 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
5847 /* Read number of modules and allocate memory for all the modules
5850 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
5851 meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
5853 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5855 /* Allocate and read all module strings */
5856 for (i = 0; i < meta->modules_num; i++) {
5857 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
5859 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
5860 if (!(*(meta->modules + i))) {
5861 /* Update number of modules to be released */
5862 meta->modules_num = i ? i - 1 : 0;
5863 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5866 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
5867 *(meta->modules + i));
5868 if (module_len > MCP_TRACE_MAX_MODULE_LEN)
5869 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
5872 /* Read second signature */
5873 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
5874 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
5875 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
5877 /* Read number of formats and allocate memory for all formats */
5878 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
5879 meta->formats = kzalloc(meta->formats_num *
5880 sizeof(struct mcp_trace_format),
5883 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5885 /* Allocate and read all strings */
5886 for (i = 0; i < meta->formats_num; i++) {
5887 struct mcp_trace_format *format_ptr = &meta->formats[i];
5890 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
5894 MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
5895 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
5896 if (!format_ptr->format_str) {
5897 /* Update number of modules to be released */
5898 meta->formats_num = i ? i - 1 : 0;
5899 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5902 qed_read_str_from_buf(meta_buf_bytes,
5904 format_len, format_ptr->format_str);
5907 return DBG_STATUS_OK;
5910 /* Parses an MCP Trace dump buffer.
5911 * If result_buf is not NULL, the MCP Trace results are printed to it.
5912 * In any case, the required results buffer size is assigned to
5913 * parsed_results_bytes.
5914 * The parsing status is returned.
5916 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5918 u32 num_dumped_dwords,
5920 u32 *parsed_results_bytes)
5922 u32 results_offset = 0, param_mask, param_shift, param_num_val;
5923 u32 num_section_params, offset, end_offset, bytes_left;
5924 const char *section_name, *param_name, *param_str_val;
5925 u32 trace_data_dwords, trace_meta_dwords;
5926 struct mcp_trace_meta meta;
5927 struct mcp_trace *trace;
5928 enum dbg_status status;
5929 const u32 *meta_buf;
5932 *parsed_results_bytes = 0;
5934 /* Read global_params section */
5935 dump_buf += qed_read_section_hdr(dump_buf,
5936 §ion_name, &num_section_params);
5937 if (strcmp(section_name, "global_params"))
5938 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5940 /* Print global params */
5941 dump_buf += qed_print_section_params(dump_buf,
5943 results_buf, &results_offset);
5945 /* Read trace_data section */
5946 dump_buf += qed_read_section_hdr(dump_buf,
5947 §ion_name, &num_section_params);
5948 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
5949 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5950 dump_buf += qed_read_param(dump_buf,
5951 ¶m_name, ¶m_str_val, ¶m_num_val);
5952 if (strcmp(param_name, "size"))
5953 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5954 trace_data_dwords = param_num_val;
5956 /* Prepare trace info */
5957 trace = (struct mcp_trace *)dump_buf;
5958 trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
5959 offset = trace->trace_oldest;
5960 end_offset = trace->trace_prod;
5961 bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
5962 dump_buf += trace_data_dwords;
5964 /* Read meta_data section */
5965 dump_buf += qed_read_section_hdr(dump_buf,
5966 §ion_name, &num_section_params);
5967 if (strcmp(section_name, "mcp_trace_meta"))
5968 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5969 dump_buf += qed_read_param(dump_buf,
5970 ¶m_name, ¶m_str_val, ¶m_num_val);
5971 if (strcmp(param_name, "size") != 0)
5972 return DBG_STATUS_MCP_TRACE_BAD_DATA;
5973 trace_meta_dwords = param_num_val;
5975 /* Choose meta data buffer */
5976 if (!trace_meta_dwords) {
5977 /* Dump doesn't include meta data */
5978 if (!s_mcp_trace_meta.ptr)
5979 return DBG_STATUS_MCP_TRACE_NO_META;
5980 meta_buf = s_mcp_trace_meta.ptr;
5982 /* Dump includes meta data */
5983 meta_buf = dump_buf;
5986 /* Allocate meta data memory */
5987 status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
5988 if (status != DBG_STATUS_OK)
5991 /* Ignore the level and modules masks - just print everything that is
5992 * already in the buffer.
5994 while (bytes_left) {
5995 struct mcp_trace_format *format_ptr;
5996 u8 format_level, format_module;
5997 u32 params[3] = { 0, 0, 0 };
5998 u32 header, format_idx, i;
6000 if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
6001 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6005 header = qed_read_from_cyclic_buf(trace_buf,
6008 MFW_TRACE_ENTRY_SIZE);
6009 bytes_left -= MFW_TRACE_ENTRY_SIZE;
6010 format_idx = header & MFW_TRACE_EVENTID_MASK;
6012 /* Skip message if its index doesn't exist in the meta data */
6013 if (format_idx > meta.formats_num) {
6016 MFW_TRACE_PRM_SIZE_MASK) >>
6017 MFW_TRACE_PRM_SIZE_SHIFT);
6019 if (bytes_left < format_size) {
6020 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6024 offset = qed_cyclic_add(offset,
6025 format_size, trace->size);
6026 bytes_left -= format_size;
6030 format_ptr = &meta.formats[format_idx];
6032 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6033 MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6034 i < MCP_TRACE_FORMAT_MAX_PARAMS;
6035 i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6036 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6037 /* Extract param size (0..3) */
6039 (u8)((format_ptr->data &
6040 param_mask) >> param_shift);
6042 /* If the param size is zero, there are no other
6048 /* Size is encoded using 2 bits, where 3 is used to
6051 if (param_size == 3)
6053 if (bytes_left < param_size) {
6054 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6058 params[i] = qed_read_from_cyclic_buf(trace_buf,
6062 bytes_left -= param_size;
6066 (u8)((format_ptr->data &
6067 MCP_TRACE_FORMAT_LEVEL_MASK) >>
6068 MCP_TRACE_FORMAT_LEVEL_SHIFT);
6070 (u8)((format_ptr->data &
6071 MCP_TRACE_FORMAT_MODULE_MASK) >>
6072 MCP_TRACE_FORMAT_MODULE_SHIFT);
6073 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
6074 status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6078 /* Print current message to results buffer */
6080 sprintf(qed_get_buf_ptr(results_buf,
6081 results_offset), "%s %-8s: ",
6082 s_mcp_trace_level_str[format_level],
6083 meta.modules[format_module]);
6085 sprintf(qed_get_buf_ptr(results_buf,
6087 format_ptr->format_str, params[0], params[1],
6092 *parsed_results_bytes = results_offset + 1;
6093 qed_mcp_trace_free_meta(p_hwfn, &meta);
6097 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
6099 u32 num_dumped_dwords,
6100 u32 *results_buf_size)
6102 return qed_parse_mcp_trace_dump(p_hwfn,
6105 NULL, results_buf_size);
6108 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
6110 u32 num_dumped_dwords,
6113 u32 parsed_buf_size;
6115 return qed_parse_mcp_trace_dump(p_hwfn,
6118 results_buf, &parsed_buf_size);
6121 /* Parses a Reg FIFO dump buffer.
6122 * If result_buf is not NULL, the Reg FIFO results are printed to it.
6123 * In any case, the required results buffer size is assigned to
6124 * parsed_results_bytes.
6125 * The parsing status is returned.
6127 static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
6129 u32 num_dumped_dwords,
6131 u32 *parsed_results_bytes)
6133 u32 results_offset = 0, param_num_val, num_section_params, num_elements;
6134 const char *section_name, *param_name, *param_str_val;
6135 struct reg_fifo_element *elements;
6136 u8 i, j, err_val, vf_val;
6139 /* Read global_params section */
6140 dump_buf += qed_read_section_hdr(dump_buf,
6141 §ion_name, &num_section_params);
6142 if (strcmp(section_name, "global_params"))
6143 return DBG_STATUS_REG_FIFO_BAD_DATA;
6145 /* Print global params */
6146 dump_buf += qed_print_section_params(dump_buf,
6148 results_buf, &results_offset);
6150 /* Read reg_fifo_data section */
6151 dump_buf += qed_read_section_hdr(dump_buf,
6152 §ion_name, &num_section_params);
6153 if (strcmp(section_name, "reg_fifo_data"))
6154 return DBG_STATUS_REG_FIFO_BAD_DATA;
6155 dump_buf += qed_read_param(dump_buf,
6156 ¶m_name, ¶m_str_val, ¶m_num_val);
6157 if (strcmp(param_name, "size"))
6158 return DBG_STATUS_REG_FIFO_BAD_DATA;
6159 if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6160 return DBG_STATUS_REG_FIFO_BAD_DATA;
6161 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6162 elements = (struct reg_fifo_element *)dump_buf;
6164 /* Decode elements */
6165 for (i = 0; i < num_elements; i++) {
6166 bool err_printed = false;
6168 /* Discover if element belongs to a VF or a PF */
6169 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6170 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6171 sprintf(vf_str, "%s", "N/A");
6173 sprintf(vf_str, "%d", vf_val);
6175 /* Add parsed element to parsed buffer */
6177 sprintf(qed_get_buf_ptr(results_buf,
6179 "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6181 (u32)GET_FIELD(elements[i].data,
6182 REG_FIFO_ELEMENT_ADDRESS) *
6183 REG_FIFO_ELEMENT_ADDR_FACTOR,
6184 s_access_strs[GET_FIELD(elements[i].data,
6185 REG_FIFO_ELEMENT_ACCESS)],
6186 (u32)GET_FIELD(elements[i].data,
6187 REG_FIFO_ELEMENT_PF), vf_str,
6188 (u32)GET_FIELD(elements[i].data,
6189 REG_FIFO_ELEMENT_PORT),
6190 s_privilege_strs[GET_FIELD(elements[i].
6192 REG_FIFO_ELEMENT_PRIVILEGE)],
6193 s_protection_strs[GET_FIELD(elements[i].data,
6194 REG_FIFO_ELEMENT_PROTECTION)],
6195 s_master_strs[GET_FIELD(elements[i].data,
6196 REG_FIFO_ELEMENT_MASTER)]);
6200 err_val = GET_FIELD(elements[i].data,
6201 REG_FIFO_ELEMENT_ERROR);
6202 j < ARRAY_SIZE(s_reg_fifo_error_strs);
6203 j++, err_val >>= 1) {
6204 if (!(err_val & 0x1))
6208 sprintf(qed_get_buf_ptr(results_buf,
6212 sprintf(qed_get_buf_ptr(results_buf,
6213 results_offset), "%s",
6214 s_reg_fifo_error_strs[j]);
6219 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6222 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6224 "fifo contained %d elements", num_elements);
6226 /* Add 1 for string NULL termination */
6227 *parsed_results_bytes = results_offset + 1;
6228 return DBG_STATUS_OK;
6231 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
6233 u32 num_dumped_dwords,
6234 u32 *results_buf_size)
6236 return qed_parse_reg_fifo_dump(p_hwfn,
6239 NULL, results_buf_size);
6242 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
6244 u32 num_dumped_dwords,
6247 u32 parsed_buf_size;
6249 return qed_parse_reg_fifo_dump(p_hwfn,
6252 results_buf, &parsed_buf_size);
6255 /* Parses an IGU FIFO dump buffer.
6256 * If result_buf is not NULL, the IGU FIFO results are printed to it.
6257 * In any case, the required results buffer size is assigned to
6258 * parsed_results_bytes.
6259 * The parsing status is returned.
6261 static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
6263 u32 num_dumped_dwords,
6265 u32 *parsed_results_bytes)
6267 u32 results_offset = 0, param_num_val, num_section_params, num_elements;
6268 const char *section_name, *param_name, *param_str_val;
6269 struct igu_fifo_element *elements;
6270 char parsed_addr_data[32];
6271 char parsed_wr_data[256];
6274 /* Read global_params section */
6275 dump_buf += qed_read_section_hdr(dump_buf,
6276 §ion_name, &num_section_params);
6277 if (strcmp(section_name, "global_params"))
6278 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6280 /* Print global params */
6281 dump_buf += qed_print_section_params(dump_buf,
6283 results_buf, &results_offset);
6285 /* Read igu_fifo_data section */
6286 dump_buf += qed_read_section_hdr(dump_buf,
6287 §ion_name, &num_section_params);
6288 if (strcmp(section_name, "igu_fifo_data"))
6289 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6290 dump_buf += qed_read_param(dump_buf,
6291 ¶m_name, ¶m_str_val, ¶m_num_val);
6292 if (strcmp(param_name, "size"))
6293 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6294 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
6295 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6296 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
6297 elements = (struct igu_fifo_element *)dump_buf;
6299 /* Decode elements */
6300 for (i = 0; i < num_elements; i++) {
6301 /* dword12 (dword index 1 and 2) contains bits 32..95 of the
6305 ((u64)elements[i].dword2 << 32) | elements[i].dword1;
6306 bool is_wr_cmd = GET_FIELD(dword12,
6307 IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6308 bool is_pf = GET_FIELD(elements[i].dword0,
6309 IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6310 u16 cmd_addr = GET_FIELD(elements[i].dword0,
6311 IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6312 u8 source = GET_FIELD(elements[i].dword0,
6313 IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6314 u8 err_type = GET_FIELD(elements[i].dword0,
6315 IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6316 const struct igu_fifo_addr_data *addr_data = NULL;
6318 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6319 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6320 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6321 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6323 /* Find address data */
6324 for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
6326 if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
6327 cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
6328 addr_data = &s_igu_fifo_addr_data[j];
6330 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6332 /* Prepare parsed address data */
6333 switch (addr_data->type) {
6334 case IGU_ADDR_TYPE_MSIX_MEM:
6335 sprintf(parsed_addr_data,
6336 " vector_num=0x%x", cmd_addr / 2);
6338 case IGU_ADDR_TYPE_WRITE_INT_ACK:
6339 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6340 sprintf(parsed_addr_data,
6341 " SB=0x%x", cmd_addr - addr_data->start_addr);
6344 parsed_addr_data[0] = '\0';
6347 /* Prepare parsed write data */
6349 u32 wr_data = GET_FIELD(dword12,
6350 IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6351 u32 prod_cons = GET_FIELD(wr_data,
6352 IGU_FIFO_WR_DATA_PROD_CONS);
6353 u8 is_cleanup = GET_FIELD(wr_data,
6354 IGU_FIFO_WR_DATA_CMD_TYPE);
6356 if (source == IGU_SRC_ATTN) {
6357 sprintf(parsed_wr_data,
6358 "prod: 0x%x, ", prod_cons);
6361 u8 cleanup_val = GET_FIELD(wr_data,
6362 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6363 u8 cleanup_type = GET_FIELD(wr_data,
6364 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6366 sprintf(parsed_wr_data,
6367 "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
6368 cleanup_val ? "set" : "clear",
6371 u8 update_flag = GET_FIELD(wr_data,
6372 IGU_FIFO_WR_DATA_UPDATE_FLAG);
6373 u8 en_dis_int_for_sb =
6375 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6376 u8 segment = GET_FIELD(wr_data,
6377 IGU_FIFO_WR_DATA_SEGMENT);
6378 u8 timer_mask = GET_FIELD(wr_data,
6379 IGU_FIFO_WR_DATA_TIMER_MASK);
6381 sprintf(parsed_wr_data,
6382 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
6384 update_flag ? "update" : "nop",
6386 ? (en_dis_int_for_sb ==
6387 1 ? "disable" : "nop") :
6389 segment ? "attn" : "regular",
6394 parsed_wr_data[0] = '\0';
6397 /* Add parsed element to parsed buffer */
6399 sprintf(qed_get_buf_ptr(results_buf,
6401 "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
6402 elements[i].dword2, elements[i].dword1,
6404 is_pf ? "pf" : "vf",
6405 GET_FIELD(elements[i].dword0,
6406 IGU_FIFO_ELEMENT_DWORD0_FID),
6407 s_igu_fifo_source_strs[source],
6408 is_wr_cmd ? "wr" : "rd", cmd_addr,
6409 (!is_pf && addr_data->vf_desc)
6410 ? addr_data->vf_desc : addr_data->desc,
6411 parsed_addr_data, parsed_wr_data,
6412 s_igu_fifo_error_strs[err_type]);
6415 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6417 "fifo contained %d elements", num_elements);
6419 /* Add 1 for string NULL termination */
6420 *parsed_results_bytes = results_offset + 1;
6421 return DBG_STATUS_OK;
6424 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
6426 u32 num_dumped_dwords,
6427 u32 *results_buf_size)
6429 return qed_parse_igu_fifo_dump(p_hwfn,
6432 NULL, results_buf_size);
6435 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
6437 u32 num_dumped_dwords,
6440 u32 parsed_buf_size;
6442 return qed_parse_igu_fifo_dump(p_hwfn,
6445 results_buf, &parsed_buf_size);
6448 static enum dbg_status
6449 qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
6451 u32 num_dumped_dwords,
6453 u32 *parsed_results_bytes)
6455 u32 results_offset = 0, param_num_val, num_section_params, num_elements;
6456 const char *section_name, *param_name, *param_str_val;
6457 struct protection_override_element *elements;
6460 /* Read global_params section */
6461 dump_buf += qed_read_section_hdr(dump_buf,
6462 §ion_name, &num_section_params);
6463 if (strcmp(section_name, "global_params"))
6464 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6466 /* Print global params */
6467 dump_buf += qed_print_section_params(dump_buf,
6469 results_buf, &results_offset);
6471 /* Read protection_override_data section */
6472 dump_buf += qed_read_section_hdr(dump_buf,
6473 §ion_name, &num_section_params);
6474 if (strcmp(section_name, "protection_override_data"))
6475 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6476 dump_buf += qed_read_param(dump_buf,
6477 ¶m_name, ¶m_str_val, ¶m_num_val);
6478 if (strcmp(param_name, "size"))
6479 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6480 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
6481 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6482 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
6483 elements = (struct protection_override_element *)dump_buf;
6485 /* Decode elements */
6486 for (i = 0; i < num_elements; i++) {
6487 u32 address = GET_FIELD(elements[i].data,
6488 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
6489 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
6492 sprintf(qed_get_buf_ptr(results_buf,
6494 "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
6496 (u32)GET_FIELD(elements[i].data,
6497 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
6498 (u32)GET_FIELD(elements[i].data,
6499 PROTECTION_OVERRIDE_ELEMENT_READ),
6500 (u32)GET_FIELD(elements[i].data,
6501 PROTECTION_OVERRIDE_ELEMENT_WRITE),
6502 s_protection_strs[GET_FIELD(elements[i].data,
6503 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
6504 s_protection_strs[GET_FIELD(elements[i].data,
6505 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
6508 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6510 "protection override contained %d elements",
6513 /* Add 1 for string NULL termination */
6514 *parsed_results_bytes = results_offset + 1;
6515 return DBG_STATUS_OK;
6519 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
6521 u32 num_dumped_dwords,
6522 u32 *results_buf_size)
6524 return qed_parse_protection_override_dump(p_hwfn,
6527 NULL, results_buf_size);
6530 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
6532 u32 num_dumped_dwords,
6535 u32 parsed_buf_size;
6537 return qed_parse_protection_override_dump(p_hwfn,
6544 /* Parses a FW Asserts dump buffer.
6545 * If result_buf is not NULL, the FW Asserts results are printed to it.
6546 * In any case, the required results buffer size is assigned to
6547 * parsed_results_bytes.
6548 * The parsing status is returned.
6550 static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
6552 u32 num_dumped_dwords,
6554 u32 *parsed_results_bytes)
6556 u32 results_offset = 0, num_section_params, param_num_val, i;
6557 const char *param_name, *param_str_val, *section_name;
6558 bool last_section_found = false;
6560 *parsed_results_bytes = 0;
6562 /* Read global_params section */
6563 dump_buf += qed_read_section_hdr(dump_buf,
6564 §ion_name, &num_section_params);
6565 if (strcmp(section_name, "global_params"))
6566 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6568 /* Print global params */
6569 dump_buf += qed_print_section_params(dump_buf,
6571 results_buf, &results_offset);
6572 while (!last_section_found) {
6573 const char *storm_letter = NULL;
6574 u32 storm_dump_size = 0;
6576 dump_buf += qed_read_section_hdr(dump_buf,
6578 &num_section_params);
6579 if (!strcmp(section_name, "last")) {
6580 last_section_found = true;
6582 } else if (strcmp(section_name, "fw_asserts")) {
6583 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6586 /* Extract params */
6587 for (i = 0; i < num_section_params; i++) {
6588 dump_buf += qed_read_param(dump_buf,
6592 if (!strcmp(param_name, "storm"))
6593 storm_letter = param_str_val;
6594 else if (!strcmp(param_name, "size"))
6595 storm_dump_size = param_num_val;
6597 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6600 if (!storm_letter || !storm_dump_size)
6601 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6604 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6606 "\n%sSTORM_ASSERT: size=%d\n",
6607 storm_letter, storm_dump_size);
6608 for (i = 0; i < storm_dump_size; i++, dump_buf++)
6610 sprintf(qed_get_buf_ptr(results_buf,
6612 "%08x\n", *dump_buf);
6615 /* Add 1 for string NULL termination */
6616 *parsed_results_bytes = results_offset + 1;
6617 return DBG_STATUS_OK;
6620 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
6622 u32 num_dumped_dwords,
6623 u32 *results_buf_size)
6625 return qed_parse_fw_asserts_dump(p_hwfn,
6628 NULL, results_buf_size);
6631 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
6633 u32 num_dumped_dwords,
6636 u32 parsed_buf_size;
6638 return qed_parse_fw_asserts_dump(p_hwfn,
6641 results_buf, &parsed_buf_size);
6644 /* Wrapper for unifying the idle_chk and mcp_trace api */
6645 static enum dbg_status
6646 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
6648 u32 num_dumped_dwords,
6651 u32 num_errors, num_warnnings;
6653 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
6654 results_buf, &num_errors,
6658 /* Feature meta data lookup table */
6661 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
6662 struct qed_ptt *p_ptt, u32 *size);
6663 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
6664 struct qed_ptt *p_ptt, u32 *dump_buf,
6665 u32 buf_size, u32 *dumped_dwords);
6666 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
6667 u32 *dump_buf, u32 num_dumped_dwords,
6669 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
6671 u32 num_dumped_dwords,
6672 u32 *results_buf_size);
6673 } qed_features_lookup[] = {
6675 "grc", qed_dbg_grc_get_dump_buf_size,
6676 qed_dbg_grc_dump, NULL, NULL}, {
6678 qed_dbg_idle_chk_get_dump_buf_size,
6679 qed_dbg_idle_chk_dump,
6680 qed_print_idle_chk_results_wrapper,
6681 qed_get_idle_chk_results_buf_size}, {
6683 qed_dbg_mcp_trace_get_dump_buf_size,
6684 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
6685 qed_get_mcp_trace_results_buf_size}, {
6687 qed_dbg_reg_fifo_get_dump_buf_size,
6688 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
6689 qed_get_reg_fifo_results_buf_size}, {
6691 qed_dbg_igu_fifo_get_dump_buf_size,
6692 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
6693 qed_get_igu_fifo_results_buf_size}, {
6694 "protection_override",
6695 qed_dbg_protection_override_get_dump_buf_size,
6696 qed_dbg_protection_override_dump,
6697 qed_print_protection_override_results,
6698 qed_get_protection_override_results_buf_size}, {
6700 qed_dbg_fw_asserts_get_dump_buf_size,
6701 qed_dbg_fw_asserts_dump,
6702 qed_print_fw_asserts_results,
6703 qed_get_fw_asserts_results_buf_size},};
6705 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
6707 u32 i, precision = 80;
6712 pr_notice("\n%.*s", precision, p_text_buf);
6713 for (i = precision; i < text_size; i += precision)
6714 pr_cont("%.*s", precision, p_text_buf + i);
6718 #define QED_RESULTS_BUF_MIN_SIZE 16
6719 /* Generic function for decoding debug feature info */
6720 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
6721 enum qed_dbg_features feature_idx)
6723 struct qed_dbg_feature *feature =
6724 &p_hwfn->cdev->dbg_params.features[feature_idx];
6725 u32 text_size_bytes, null_char_pos, i;
6729 /* Check if feature supports formatting capability */
6730 if (!qed_features_lookup[feature_idx].results_buf_size)
6731 return DBG_STATUS_OK;
6733 /* Obtain size of formatted output */
6734 rc = qed_features_lookup[feature_idx].
6735 results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
6736 feature->dumped_dwords, &text_size_bytes);
6737 if (rc != DBG_STATUS_OK)
6740 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
6741 null_char_pos = text_size_bytes - 1;
6742 text_size_bytes = (text_size_bytes + 3) & ~0x3;
6744 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
6745 DP_NOTICE(p_hwfn->cdev,
6746 "formatted size of feature was too small %d. Aborting\n",
6748 return DBG_STATUS_INVALID_ARGS;
6751 /* Allocate temp text buf */
6752 text_buf = vzalloc(text_size_bytes);
6754 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6756 /* Decode feature opcodes to string on temp buf */
6757 rc = qed_features_lookup[feature_idx].
6758 print_results(p_hwfn, (u32 *)feature->dump_buf,
6759 feature->dumped_dwords, text_buf);
6760 if (rc != DBG_STATUS_OK) {
6765 /* Replace the original null character with a '\n' character.
6766 * The bytes that were added as a result of the dword alignment are also
6767 * padded with '\n' characters.
6769 for (i = null_char_pos; i < text_size_bytes; i++)
6772 /* Dump printable feature to log */
6773 if (p_hwfn->cdev->dbg_params.print_data)
6774 qed_dbg_print_feature(text_buf, text_size_bytes);
6776 /* Free the old dump_buf and point the dump_buf to the newly allocagted
6777 * and formatted text buffer.
6779 vfree(feature->dump_buf);
6780 feature->dump_buf = text_buf;
6781 feature->buf_size = text_size_bytes;
6782 feature->dumped_dwords = text_size_bytes / 4;
6786 /* Generic function for performing the dump of a debug feature. */
6787 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
6788 struct qed_ptt *p_ptt,
6789 enum qed_dbg_features feature_idx)
6791 struct qed_dbg_feature *feature =
6792 &p_hwfn->cdev->dbg_params.features[feature_idx];
6793 u32 buf_size_dwords;
6796 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
6797 qed_features_lookup[feature_idx].name);
6799 /* Dump_buf was already allocated need to free (this can happen if dump
6800 * was called but file was never read).
6801 * We can't use the buffer as is since size may have changed.
6803 if (feature->dump_buf) {
6804 vfree(feature->dump_buf);
6805 feature->dump_buf = NULL;
6808 /* Get buffer size from hsi, allocate accordingly, and perform the
6811 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
6813 if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6815 feature->buf_size = buf_size_dwords * sizeof(u32);
6816 feature->dump_buf = vmalloc(feature->buf_size);
6817 if (!feature->dump_buf)
6818 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6820 rc = qed_features_lookup[feature_idx].
6821 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
6822 feature->buf_size / sizeof(u32),
6823 &feature->dumped_dwords);
6825 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
6826 * In this case the buffer holds valid binary data, but we wont able
6827 * to parse it (since parsing relies on data in NVRAM which is only
6828 * accessible when MFW is responsive). skip the formatting but return
6829 * success so that binary data is provided.
6831 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6832 return DBG_STATUS_OK;
6834 if (rc != DBG_STATUS_OK)
6838 rc = format_feature(p_hwfn, feature_idx);
6842 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6844 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
6847 int qed_dbg_grc_size(struct qed_dev *cdev)
6849 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
6852 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6854 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
6858 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
6860 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
6863 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6865 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
6869 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
6871 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
6874 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6876 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
6880 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
6882 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
6885 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
6886 u32 *num_dumped_bytes)
6888 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
6892 int qed_dbg_protection_override_size(struct qed_dev *cdev)
6894 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
6897 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
6898 u32 *num_dumped_bytes)
6900 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
6904 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
6906 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
6909 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
6910 u32 *num_dumped_bytes)
6912 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
6916 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
6918 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
6921 /* Defines the amount of bytes allocated for recording the length of debugfs
6924 #define REGDUMP_HEADER_SIZE sizeof(u32)
6925 #define REGDUMP_HEADER_FEATURE_SHIFT 24
6926 #define REGDUMP_HEADER_ENGINE_SHIFT 31
6927 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
6928 enum debug_print_features {
6934 PROTECTION_OVERRIDE = 5,
6940 static u32 qed_calc_regdump_header(enum debug_print_features feature,
6941 int engine, u32 feature_size, u8 omit_engine)
6943 /* Insert the engine, feature and mode inside the header and combine it
6944 * with feature size.
6946 return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
6947 (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
6948 (engine << REGDUMP_HEADER_ENGINE_SHIFT);
6951 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
6953 u8 cur_engine, omit_engine = 0, org_engine;
6954 u32 offset = 0, feature_size;
6957 if (cdev->num_hwfns == 1)
6960 org_engine = qed_get_debug_engine(cdev);
6961 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
6962 /* Collect idle_chks and grcDump for each hw function */
6963 DP_VERBOSE(cdev, QED_MSG_DEBUG,
6964 "obtaining idle_chk and grcdump for current engine\n");
6965 qed_set_debug_engine(cdev, cur_engine);
6967 /* First idle_chk */
6968 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
6969 REGDUMP_HEADER_SIZE, &feature_size);
6971 *(u32 *)((u8 *)buffer + offset) =
6972 qed_calc_regdump_header(IDLE_CHK, cur_engine,
6973 feature_size, omit_engine);
6974 offset += (feature_size + REGDUMP_HEADER_SIZE);
6976 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
6979 /* Second idle_chk */
6980 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
6981 REGDUMP_HEADER_SIZE, &feature_size);
6983 *(u32 *)((u8 *)buffer + offset) =
6984 qed_calc_regdump_header(IDLE_CHK, cur_engine,
6985 feature_size, omit_engine);
6986 offset += (feature_size + REGDUMP_HEADER_SIZE);
6988 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
6992 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
6993 REGDUMP_HEADER_SIZE, &feature_size);
6995 *(u32 *)((u8 *)buffer + offset) =
6996 qed_calc_regdump_header(REG_FIFO, cur_engine,
6997 feature_size, omit_engine);
6998 offset += (feature_size + REGDUMP_HEADER_SIZE);
7000 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7004 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7005 REGDUMP_HEADER_SIZE, &feature_size);
7007 *(u32 *)((u8 *)buffer + offset) =
7008 qed_calc_regdump_header(IGU_FIFO, cur_engine,
7009 feature_size, omit_engine);
7010 offset += (feature_size + REGDUMP_HEADER_SIZE);
7012 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7015 /* protection_override dump */
7016 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7017 REGDUMP_HEADER_SIZE,
7020 *(u32 *)((u8 *)buffer + offset) =
7021 qed_calc_regdump_header(PROTECTION_OVERRIDE,
7023 feature_size, omit_engine);
7024 offset += (feature_size + REGDUMP_HEADER_SIZE);
7027 "qed_dbg_protection_override failed. rc = %d\n",
7031 /* fw_asserts dump */
7032 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7033 REGDUMP_HEADER_SIZE, &feature_size);
7035 *(u32 *)((u8 *)buffer + offset) =
7036 qed_calc_regdump_header(FW_ASSERTS, cur_engine,
7037 feature_size, omit_engine);
7038 offset += (feature_size + REGDUMP_HEADER_SIZE);
7040 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7044 /* GRC dump - must be last because when mcp stuck it will
7045 * clutter idle_chk, reg_fifo, ...
7047 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7048 REGDUMP_HEADER_SIZE, &feature_size);
7050 *(u32 *)((u8 *)buffer + offset) =
7051 qed_calc_regdump_header(GRC_DUMP, cur_engine,
7052 feature_size, omit_engine);
7053 offset += (feature_size + REGDUMP_HEADER_SIZE);
7055 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7060 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7061 REGDUMP_HEADER_SIZE, &feature_size);
7063 *(u32 *)((u8 *)buffer + offset) =
7064 qed_calc_regdump_header(MCP_TRACE, cur_engine,
7065 feature_size, omit_engine);
7066 offset += (feature_size + REGDUMP_HEADER_SIZE);
7068 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7071 qed_set_debug_engine(cdev, org_engine);
7076 int qed_dbg_all_data_size(struct qed_dev *cdev)
7078 u8 cur_engine, org_engine;
7081 org_engine = qed_get_debug_engine(cdev);
7082 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7083 /* Engine specific */
7084 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7085 "calculating idle_chk and grcdump register length for current engine\n");
7086 qed_set_debug_engine(cdev, cur_engine);
7087 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7088 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7089 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
7090 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
7091 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
7092 REGDUMP_HEADER_SIZE +
7093 qed_dbg_protection_override_size(cdev) +
7094 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
7098 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
7099 qed_set_debug_engine(cdev, org_engine);
7104 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
7105 enum qed_dbg_features feature, u32 *num_dumped_bytes)
7107 struct qed_hwfn *p_hwfn =
7108 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
7109 struct qed_dbg_feature *qed_feature =
7110 &cdev->dbg_params.features[feature];
7111 enum dbg_status dbg_rc;
7112 struct qed_ptt *p_ptt;
7116 p_ptt = qed_ptt_acquire(p_hwfn);
7121 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
7122 if (dbg_rc != DBG_STATUS_OK) {
7123 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
7124 qed_dbg_get_status_str(dbg_rc));
7125 *num_dumped_bytes = 0;
7130 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7131 "copying debugfs feature to external buffer\n");
7132 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
7133 *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
7137 qed_ptt_release(p_hwfn, p_ptt);
7141 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
7143 struct qed_hwfn *p_hwfn =
7144 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
7145 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
7146 struct qed_dbg_feature *qed_feature =
7147 &cdev->dbg_params.features[feature];
7148 u32 buf_size_dwords;
7154 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
7156 if (rc != DBG_STATUS_OK)
7157 buf_size_dwords = 0;
7159 qed_ptt_release(p_hwfn, p_ptt);
7160 qed_feature->buf_size = buf_size_dwords * sizeof(u32);
7161 return qed_feature->buf_size;
7164 u8 qed_get_debug_engine(struct qed_dev *cdev)
7166 return cdev->dbg_params.engine_for_debug;
7169 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
7171 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
7173 cdev->dbg_params.engine_for_debug = engine_number;
7176 void qed_dbg_pf_init(struct qed_dev *cdev)
7178 const u8 *dbg_values;
7180 /* Debug values are after init values.
7181 * The offset is the first dword of the file.
7183 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
7184 qed_dbg_set_bin_ptr((u8 *)dbg_values);
7185 qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
7188 void qed_dbg_pf_exit(struct qed_dev *cdev)
7190 struct qed_dbg_feature *feature = NULL;
7191 enum qed_dbg_features feature_idx;
7193 /* Debug features' buffers may be allocated if debug feature was used
7194 * but dump wasn't called.
7196 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
7197 feature = &cdev->dbg_params.features[feature_idx];
7198 if (feature->dump_buf) {
7199 vfree(feature->dump_buf);
7200 feature->dump_buf = NULL;