]> Git Repo - linux.git/blob - drivers/net/bnx2x_main.c
hso: remove driver version
[linux.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <[email protected]>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.53-1"
61 #define DRV_MODULE_RELDATE      "2010/18/04"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106                                 "(1 INT#x; 2 MSI)");
107
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112 static int poll;
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120 static int debug;
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125
126 static struct workqueue_struct *bnx2x_wq;
127
128 enum bnx2x_board_type {
129         BCM57710 = 0,
130         BCM57711 = 1,
131         BCM57711E = 2,
132 };
133
134 /* indexed by board_type, above */
135 static struct {
136         char *name;
137 } board_info[] __devinitdata = {
138         { "Broadcom NetXtreme II BCM57710 XGb" },
139         { "Broadcom NetXtreme II BCM57711 XGb" },
140         { "Broadcom NetXtreme II BCM57711E XGb" }
141 };
142
143
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
148         { 0 }
149 };
150
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
156
157 /* used only at init
158  * locking is done by mcp
159  */
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 {
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165                                PCICFG_VENDOR_ID_OFFSET);
166 }
167
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169 {
170         u32 val;
171
172         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175                                PCICFG_VENDOR_ID_OFFSET);
176
177         return val;
178 }
179
180 static const u32 dmae_reg_go_c[] = {
181         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185 };
186
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189                             int idx)
190 {
191         u32 cmd_offset;
192         int i;
193
194         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
198                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200         }
201         REG_WR(bp, dmae_reg_go_c[idx], 1);
202 }
203
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205                       u32 len32)
206 {
207         struct dmae_command dmae;
208         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
209         int cnt = 200;
210
211         if (!bp->dmae_ready) {
212                 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
215                    "  using indirect\n", dst_addr, len32);
216                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217                 return;
218         }
219
220         memset(&dmae, 0, sizeof(struct dmae_command));
221
222         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 #ifdef __BIG_ENDIAN
226                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 #else
228                        DMAE_CMD_ENDIANITY_DW_SWAP |
229 #endif
230                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232         dmae.src_addr_lo = U64_LO(dma_addr);
233         dmae.src_addr_hi = U64_HI(dma_addr);
234         dmae.dst_addr_lo = dst_addr >> 2;
235         dmae.dst_addr_hi = 0;
236         dmae.len = len32;
237         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239         dmae.comp_val = DMAE_COMP_VAL;
240
241         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
243                     "dst_addr [%x:%08x (%08x)]\n"
244            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
245            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251
252         mutex_lock(&bp->dmae_mutex);
253
254         *wb_comp = 0;
255
256         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
257
258         udelay(5);
259
260         while (*wb_comp != DMAE_COMP_VAL) {
261                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
263                 if (!cnt) {
264                         BNX2X_ERR("DMAE timeout!\n");
265                         break;
266                 }
267                 cnt--;
268                 /* adjust delay for emulation/FPGA */
269                 if (CHIP_REV_IS_SLOW(bp))
270                         msleep(100);
271                 else
272                         udelay(5);
273         }
274
275         mutex_unlock(&bp->dmae_mutex);
276 }
277
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 {
280         struct dmae_command dmae;
281         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
282         int cnt = 200;
283
284         if (!bp->dmae_ready) {
285                 u32 *data = bnx2x_sp(bp, wb_data[0]);
286                 int i;
287
288                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
289                    "  using indirect\n", src_addr, len32);
290                 for (i = 0; i < len32; i++)
291                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292                 return;
293         }
294
295         memset(&dmae, 0, sizeof(struct dmae_command));
296
297         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 #ifdef __BIG_ENDIAN
301                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 #else
303                        DMAE_CMD_ENDIANITY_DW_SWAP |
304 #endif
305                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307         dmae.src_addr_lo = src_addr >> 2;
308         dmae.src_addr_hi = 0;
309         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311         dmae.len = len32;
312         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314         dmae.comp_val = DMAE_COMP_VAL;
315
316         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
318                     "dst_addr [%x:%08x (%08x)]\n"
319            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
320            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323
324         mutex_lock(&bp->dmae_mutex);
325
326         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
327         *wb_comp = 0;
328
329         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
330
331         udelay(5);
332
333         while (*wb_comp != DMAE_COMP_VAL) {
334
335                 if (!cnt) {
336                         BNX2X_ERR("DMAE timeout!\n");
337                         break;
338                 }
339                 cnt--;
340                 /* adjust delay for emulation/FPGA */
341                 if (CHIP_REV_IS_SLOW(bp))
342                         msleep(100);
343                 else
344                         udelay(5);
345         }
346         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349
350         mutex_unlock(&bp->dmae_mutex);
351 }
352
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354                                u32 addr, u32 len)
355 {
356         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
357         int offset = 0;
358
359         while (len > dmae_wr_max) {
360                 bnx2x_write_dmae(bp, phys_addr + offset,
361                                  addr + offset, dmae_wr_max);
362                 offset += dmae_wr_max * 4;
363                 len -= dmae_wr_max;
364         }
365
366         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 }
368
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371 {
372         u32 wb_write[2];
373
374         wb_write[0] = val_hi;
375         wb_write[1] = val_lo;
376         REG_WR_DMAE(bp, reg, wb_write, 2);
377 }
378
379 #ifdef USE_WB_RD
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381 {
382         u32 wb_data[2];
383
384         REG_RD_DMAE(bp, reg, wb_data, 2);
385
386         return HILO_U64(wb_data[0], wb_data[1]);
387 }
388 #endif
389
390 static int bnx2x_mc_assert(struct bnx2x *bp)
391 {
392         char last_idx;
393         int i, rc = 0;
394         u32 row0, row1, row2, row3;
395
396         /* XSTORM */
397         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
399         if (last_idx)
400                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402         /* print the asserts */
403         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i));
407                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416                                   " 0x%08x 0x%08x 0x%08x\n",
417                                   i, row3, row2, row1, row0);
418                         rc++;
419                 } else {
420                         break;
421                 }
422         }
423
424         /* TSTORM */
425         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
427         if (last_idx)
428                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430         /* print the asserts */
431         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i));
435                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444                                   " 0x%08x 0x%08x 0x%08x\n",
445                                   i, row3, row2, row1, row0);
446                         rc++;
447                 } else {
448                         break;
449                 }
450         }
451
452         /* CSTORM */
453         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
455         if (last_idx)
456                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458         /* print the asserts */
459         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i));
463                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472                                   " 0x%08x 0x%08x 0x%08x\n",
473                                   i, row3, row2, row1, row0);
474                         rc++;
475                 } else {
476                         break;
477                 }
478         }
479
480         /* USTORM */
481         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482                            USTORM_ASSERT_LIST_INDEX_OFFSET);
483         if (last_idx)
484                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486         /* print the asserts */
487         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i));
491                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
493                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
495                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500                                   " 0x%08x 0x%08x 0x%08x\n",
501                                   i, row3, row2, row1, row0);
502                         rc++;
503                 } else {
504                         break;
505                 }
506         }
507
508         return rc;
509 }
510
511 static void bnx2x_fw_dump(struct bnx2x *bp)
512 {
513         u32 addr;
514         u32 mark, offset;
515         __be32 data[9];
516         int word;
517
518         if (BP_NOMCP(bp)) {
519                 BNX2X_ERR("NO MCP - can not dump\n");
520                 return;
521         }
522
523         addr = bp->common.shmem_base - 0x0800 + 4;
524         mark = REG_RD(bp, addr);
525         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526         pr_err("begin fw dump (mark 0x%x)\n", mark);
527
528         pr_err("");
529         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536                 for (word = 0; word < 8; word++)
537                         data[word] = htonl(REG_RD(bp, offset + 4*word));
538                 data[8] = 0x0;
539                 pr_cont("%s", (char *)data);
540         }
541         pr_err("end of fw dump\n");
542 }
543
544 static void bnx2x_panic_dump(struct bnx2x *bp)
545 {
546         int i;
547         u16 j, start, end;
548
549         bp->stats_state = STATS_STATE_DISABLED;
550         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
552         BNX2X_ERR("begin crash dump -----------------\n");
553
554         /* Indices */
555         /* Common */
556         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
557                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
558                   "  spq_prod_idx(0x%x)\n",
559                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562         /* Rx */
563         for_each_queue(bp, i) {
564                 struct bnx2x_fastpath *fp = &bp->fp[i];
565
566                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
567                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
568                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
569                           i, fp->rx_bd_prod, fp->rx_bd_cons,
570                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
573                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574                           fp->rx_sge_prod, fp->last_max_sge,
575                           le16_to_cpu(fp->fp_u_idx),
576                           fp->status_blk->u_status_block.status_block_index);
577         }
578
579         /* Tx */
580         for_each_queue(bp, i) {
581                 struct bnx2x_fastpath *fp = &bp->fp[i];
582
583                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
584                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
585                           "  *tx_cons_sb(0x%x)\n",
586                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
589                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590                           fp->status_blk->c_status_block.status_block_index,
591                           fp->tx_db.data.prod);
592         }
593
594         /* Rings */
595         /* Rx */
596         for_each_queue(bp, i) {
597                 struct bnx2x_fastpath *fp = &bp->fp[i];
598
599                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601                 for (j = start; j != end; j = RX_BD(j + 1)) {
602                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
605                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
606                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
607                 }
608
609                 start = RX_SGE(fp->rx_sge_prod);
610                 end = RX_SGE(fp->last_max_sge);
611                 for (j = start; j != end; j = RX_SGE(j + 1)) {
612                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
615                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
616                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
617                 }
618
619                 start = RCQ_BD(fp->rx_comp_cons - 10);
620                 end = RCQ_BD(fp->rx_comp_cons + 503);
621                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
624                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
626                 }
627         }
628
629         /* Tx */
630         for_each_queue(bp, i) {
631                 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635                 for (j = start; j != end; j = TX_BD(j + 1)) {
636                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
638                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639                                   i, j, sw_bd->skb, sw_bd->first_bd);
640                 }
641
642                 start = TX_BD(fp->tx_bd_cons - 10);
643                 end = TX_BD(fp->tx_bd_cons + 254);
644                 for (j = start; j != end; j = TX_BD(j + 1)) {
645                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
647                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
649                 }
650         }
651
652         bnx2x_fw_dump(bp);
653         bnx2x_mc_assert(bp);
654         BNX2X_ERR("end crash dump -----------------\n");
655 }
656
657 static void bnx2x_int_enable(struct bnx2x *bp)
658 {
659         int port = BP_PORT(bp);
660         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661         u32 val = REG_RD(bp, addr);
662         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
664
665         if (msix) {
666                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                          HC_CONFIG_0_REG_INT_LINE_EN_0);
668                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670         } else if (msi) {
671                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675         } else {
676                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
679                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
680
681                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682                    val, port, addr);
683
684                 REG_WR(bp, addr, val);
685
686                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687         }
688
689         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
690            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
691
692         REG_WR(bp, addr, val);
693         /*
694          * Ensure that HC_CONFIG is written before leading/trailing edge config
695          */
696         mmiowb();
697         barrier();
698
699         if (CHIP_IS_E1H(bp)) {
700                 /* init leading/trailing edge */
701                 if (IS_E1HMF(bp)) {
702                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
703                         if (bp->port.pmf)
704                                 /* enable nig and gpio3 attention */
705                                 val |= 0x1100;
706                 } else
707                         val = 0xffff;
708
709                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711         }
712
713         /* Make sure that interrupts are indeed enabled from here on */
714         mmiowb();
715 }
716
717 static void bnx2x_int_disable(struct bnx2x *bp)
718 {
719         int port = BP_PORT(bp);
720         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721         u32 val = REG_RD(bp, addr);
722
723         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
726                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729            val, port, addr);
730
731         /* flush all outstanding writes */
732         mmiowb();
733
734         REG_WR(bp, addr, val);
735         if (REG_RD(bp, addr) != val)
736                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737 }
738
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740 {
741         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
742         int i, offset;
743
744         /* disable interrupt handling */
745         atomic_inc(&bp->intr_sem);
746         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
748         if (disable_hw)
749                 /* prevent the HW from sending interrupts */
750                 bnx2x_int_disable(bp);
751
752         /* make sure all ISRs are done */
753         if (msix) {
754                 synchronize_irq(bp->msix_table[0].vector);
755                 offset = 1;
756 #ifdef BCM_CNIC
757                 offset++;
758 #endif
759                 for_each_queue(bp, i)
760                         synchronize_irq(bp->msix_table[i + offset].vector);
761         } else
762                 synchronize_irq(bp->pdev->irq);
763
764         /* make sure sp_task is not running */
765         cancel_delayed_work(&bp->sp_task);
766         flush_workqueue(bnx2x_wq);
767 }
768
769 /* fast path */
770
771 /*
772  * General service functions
773  */
774
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777 {
778         u32 lock_status;
779         u32 resource_bit = (1 << resource);
780         int func = BP_FUNC(bp);
781         u32 hw_lock_control_reg;
782
783         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785         /* Validating that the resource is within range */
786         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787                 DP(NETIF_MSG_HW,
788                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
790                 return -EINVAL;
791         }
792
793         if (func <= 5)
794                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795         else
796                 hw_lock_control_reg =
797                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799         /* Try to acquire the lock */
800         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801         lock_status = REG_RD(bp, hw_lock_control_reg);
802         if (lock_status & resource_bit)
803                 return true;
804
805         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806         return false;
807 }
808
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810                                 u8 storm, u16 index, u8 op, u8 update)
811 {
812         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813                        COMMAND_REG_INT_ACK);
814         struct igu_ack_register igu_ack;
815
816         igu_ack.status_block_index = index;
817         igu_ack.sb_id_and_flags =
818                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
823         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824            (*(u32 *)&igu_ack), hc_addr);
825         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
826
827         /* Make sure that ACK is written */
828         mmiowb();
829         barrier();
830 }
831
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
833 {
834         struct host_status_block *fpsb = fp->status_blk;
835
836         barrier(); /* status block is written to by the chip */
837         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
839 }
840
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
842 {
843         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844                        COMMAND_REG_SIMD_MASK);
845         u32 result = REG_RD(bp, hc_addr);
846
847         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848            result, hc_addr);
849
850         return result;
851 }
852
853
854 /*
855  * fast path service functions
856  */
857
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859 {
860         /* Tell compiler that consumer and producer can change */
861         barrier();
862         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
863 }
864
865 /* free skb in the packet ring at pos idx
866  * return idx of last bd freed
867  */
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869                              u16 idx)
870 {
871         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872         struct eth_tx_start_bd *tx_start_bd;
873         struct eth_tx_bd *tx_data_bd;
874         struct sk_buff *skb = tx_buf->skb;
875         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
876         int nbd;
877
878         /* prefetch skb end pointer to speedup dev_kfree_skb() */
879         prefetch(&skb->end);
880
881         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
882            idx, tx_buf, skb);
883
884         /* unmap first bd */
885         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
889
890         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893                 BNX2X_ERR("BAD nbd!\n");
894                 bnx2x_panic();
895         }
896 #endif
897         new_cons = nbd + tx_buf->first_bd;
898
899         /* Get the next bd */
900         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902         /* Skip a parse bd... */
903         --nbd;
904         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906         /* ...and the TSO split header bd since they have no mapping */
907         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908                 --nbd;
909                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
910         }
911
912         /* now free frags */
913         while (nbd > 0) {
914
915                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
919                 if (--nbd)
920                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921         }
922
923         /* release skb */
924         WARN_ON(!skb);
925         dev_kfree_skb(skb);
926         tx_buf->first_bd = 0;
927         tx_buf->skb = NULL;
928
929         return new_cons;
930 }
931
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
933 {
934         s16 used;
935         u16 prod;
936         u16 cons;
937
938         prod = fp->tx_bd_prod;
939         cons = fp->tx_bd_cons;
940
941         /* NUM_TX_RINGS = number of "next-page" entries
942            It will be used as a threshold */
943         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
944
945 #ifdef BNX2X_STOP_ON_ERROR
946         WARN_ON(used < 0);
947         WARN_ON(used > fp->bp->tx_ring_size);
948         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
949 #endif
950
951         return (s16)(fp->bp->tx_ring_size) - used;
952 }
953
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955 {
956         u16 hw_cons;
957
958         /* Tell compiler that status block fields can change */
959         barrier();
960         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961         return hw_cons != fp->tx_pkt_cons;
962 }
963
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
965 {
966         struct bnx2x *bp = fp->bp;
967         struct netdev_queue *txq;
968         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
969
970 #ifdef BNX2X_STOP_ON_ERROR
971         if (unlikely(bp->panic))
972                 return -1;
973 #endif
974
975         txq = netdev_get_tx_queue(bp->dev, fp->index);
976         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977         sw_cons = fp->tx_pkt_cons;
978
979         while (sw_cons != hw_cons) {
980                 u16 pkt_cons;
981
982                 pkt_cons = TX_BD(sw_cons);
983
984                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
986                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
987                    hw_cons, sw_cons, pkt_cons);
988
989 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
990                         rmb();
991                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992                 }
993 */
994                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995                 sw_cons++;
996         }
997
998         fp->tx_pkt_cons = sw_cons;
999         fp->tx_bd_cons = bd_cons;
1000
1001         /* Need to make the tx_bd_cons update visible to start_xmit()
1002          * before checking for netif_tx_queue_stopped().  Without the
1003          * memory barrier, there is a small possibility that
1004          * start_xmit() will miss it and cause the queue to be stopped
1005          * forever.
1006          */
1007         smp_mb();
1008
1009         /* TBD need a thresh? */
1010         if (unlikely(netif_tx_queue_stopped(txq))) {
1011                 /* Taking tx_lock() is needed to prevent reenabling the queue
1012                  * while it's empty. This could have happen if rx_action() gets
1013                  * suspended in bnx2x_tx_int() after the condition before
1014                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015                  *
1016                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017                  * sends some packets consuming the whole queue again->
1018                  * stops the queue
1019                  */
1020
1021                 __netif_tx_lock(txq, smp_processor_id());
1022
1023                 if ((netif_tx_queue_stopped(txq)) &&
1024                     (bp->state == BNX2X_STATE_OPEN) &&
1025                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026                         netif_tx_wake_queue(txq);
1027
1028                 __netif_tx_unlock(txq);
1029         }
1030         return 0;
1031 }
1032
1033 #ifdef BCM_CNIC
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035 #endif
1036
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038                            union eth_rx_cqe *rr_cqe)
1039 {
1040         struct bnx2x *bp = fp->bp;
1041         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
1044         DP(BNX2X_MSG_SP,
1045            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1046            fp->index, cid, command, bp->state,
1047            rr_cqe->ramrod_cqe.ramrod_type);
1048
1049         bp->spq_left++;
1050
1051         if (fp->index) {
1052                 switch (command | fp->state) {
1053                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054                                                 BNX2X_FP_STATE_OPENING):
1055                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056                            cid);
1057                         fp->state = BNX2X_FP_STATE_OPEN;
1058                         break;
1059
1060                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062                            cid);
1063                         fp->state = BNX2X_FP_STATE_HALTED;
1064                         break;
1065
1066                 default:
1067                         BNX2X_ERR("unexpected MC reply (%d)  "
1068                                   "fp[%d] state is %x\n",
1069                                   command, fp->index, fp->state);
1070                         break;
1071                 }
1072                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1073                 return;
1074         }
1075
1076         switch (command | bp->state) {
1077         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079                 bp->state = BNX2X_STATE_OPEN;
1080                 break;
1081
1082         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085                 fp->state = BNX2X_FP_STATE_HALTED;
1086                 break;
1087
1088         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1091                 break;
1092
1093 #ifdef BCM_CNIC
1094         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096                 bnx2x_cnic_cfc_comp(bp, cid);
1097                 break;
1098 #endif
1099
1100         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103                 bp->set_mac_pending--;
1104                 smp_wmb();
1105                 break;
1106
1107         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109                 bp->set_mac_pending--;
1110                 smp_wmb();
1111                 break;
1112
1113         default:
1114                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1115                           command, bp->state);
1116                 break;
1117         }
1118         mb(); /* force bnx2x_wait_ramrod() to see the change */
1119 }
1120
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122                                      struct bnx2x_fastpath *fp, u16 index)
1123 {
1124         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125         struct page *page = sw_buf->page;
1126         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128         /* Skip "next page" elements */
1129         if (!page)
1130                 return;
1131
1132         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134         __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136         sw_buf->page = NULL;
1137         sge->addr_hi = 0;
1138         sge->addr_lo = 0;
1139 }
1140
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142                                            struct bnx2x_fastpath *fp, int last)
1143 {
1144         int i;
1145
1146         for (i = 0; i < last; i++)
1147                 bnx2x_free_rx_sge(bp, fp, i);
1148 }
1149
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151                                      struct bnx2x_fastpath *fp, u16 index)
1152 {
1153         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156         dma_addr_t mapping;
1157
1158         if (unlikely(page == NULL))
1159                 return -ENOMEM;
1160
1161         mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165                 return -ENOMEM;
1166         }
1167
1168         sw_buf->page = page;
1169         dma_unmap_addr_set(sw_buf, mapping, mapping);
1170
1171         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174         return 0;
1175 }
1176
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178                                      struct bnx2x_fastpath *fp, u16 index)
1179 {
1180         struct sk_buff *skb;
1181         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183         dma_addr_t mapping;
1184
1185         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186         if (unlikely(skb == NULL))
1187                 return -ENOMEM;
1188
1189         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190                                  DMA_FROM_DEVICE);
1191         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1192                 dev_kfree_skb(skb);
1193                 return -ENOMEM;
1194         }
1195
1196         rx_buf->skb = skb;
1197         dma_unmap_addr_set(rx_buf, mapping, mapping);
1198
1199         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202         return 0;
1203 }
1204
1205 /* note that we are not allocating a new skb,
1206  * we are just moving one from cons to prod
1207  * we are not creating a new mapping,
1208  * so there is no need to check for dma_mapping_error().
1209  */
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211                                struct sk_buff *skb, u16 cons, u16 prod)
1212 {
1213         struct bnx2x *bp = fp->bp;
1214         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1219         dma_sync_single_for_device(&bp->pdev->dev,
1220                                    dma_unmap_addr(cons_rx_buf, mapping),
1221                                    RX_COPY_THRESH, DMA_FROM_DEVICE);
1222
1223         prod_rx_buf->skb = cons_rx_buf->skb;
1224         dma_unmap_addr_set(prod_rx_buf, mapping,
1225                            dma_unmap_addr(cons_rx_buf, mapping));
1226         *prod_bd = *cons_bd;
1227 }
1228
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230                                              u16 idx)
1231 {
1232         u16 last_max = fp->last_max_sge;
1233
1234         if (SUB_S16(idx, last_max) > 0)
1235                 fp->last_max_sge = idx;
1236 }
1237
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239 {
1240         int i, j;
1241
1242         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243                 int idx = RX_SGE_CNT * i - 1;
1244
1245                 for (j = 0; j < 2; j++) {
1246                         SGE_MASK_CLEAR_BIT(fp, idx);
1247                         idx--;
1248                 }
1249         }
1250 }
1251
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253                                   struct eth_fast_path_rx_cqe *fp_cqe)
1254 {
1255         struct bnx2x *bp = fp->bp;
1256         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1258                       SGE_PAGE_SHIFT;
1259         u16 last_max, last_elem, first_elem;
1260         u16 delta = 0;
1261         u16 i;
1262
1263         if (!sge_len)
1264                 return;
1265
1266         /* First mark all used pages */
1267         for (i = 0; i < sge_len; i++)
1268                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273         /* Here we assume that the last SGE index is the biggest */
1274         prefetch((void *)(fp->sge_mask));
1275         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277         last_max = RX_SGE(fp->last_max_sge);
1278         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281         /* If ring is not full */
1282         if (last_elem + 1 != first_elem)
1283                 last_elem++;
1284
1285         /* Now update the prod */
1286         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287                 if (likely(fp->sge_mask[i]))
1288                         break;
1289
1290                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291                 delta += RX_SGE_MASK_ELEM_SZ;
1292         }
1293
1294         if (delta > 0) {
1295                 fp->rx_sge_prod += delta;
1296                 /* clear page-end entries */
1297                 bnx2x_clear_sge_mask_next_elems(fp);
1298         }
1299
1300         DP(NETIF_MSG_RX_STATUS,
1301            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1302            fp->last_max_sge, fp->rx_sge_prod);
1303 }
1304
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306 {
1307         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308         memset(fp->sge_mask, 0xff,
1309                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
1311         /* Clear the two last indices in the page to 1:
1312            these are the indices that correspond to the "next" element,
1313            hence will never be indicated and should be removed from
1314            the calculations. */
1315         bnx2x_clear_sge_mask_next_elems(fp);
1316 }
1317
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319                             struct sk_buff *skb, u16 cons, u16 prod)
1320 {
1321         struct bnx2x *bp = fp->bp;
1322         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325         dma_addr_t mapping;
1326
1327         /* move empty skb from pool to prod and map it */
1328         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330                                  bp->rx_buf_size, DMA_FROM_DEVICE);
1331         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1332
1333         /* move partial skb from cons to pool (don't unmap yet) */
1334         fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336         /* mark bin state as start - print error if current state != stop */
1337         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340         fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342         /* point prod_bd to new skb */
1343         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347         fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350 #else
1351         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352 #endif
1353            fp->tpa_queue_used);
1354 #endif
1355 }
1356
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358                                struct sk_buff *skb,
1359                                struct eth_fast_path_rx_cqe *fp_cqe,
1360                                u16 cqe_idx)
1361 {
1362         struct sw_rx_page *rx_pg, old_rx_pg;
1363         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364         u32 i, frag_len, frag_size, pages;
1365         int err;
1366         int j;
1367
1368         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1370
1371         /* This is needed in order to enable forwarding support */
1372         if (frag_size)
1373                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374                                                max(frag_size, (u32)len_on_bd));
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379                           pages, cqe_idx);
1380                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1381                           fp_cqe->pkt_len, len_on_bd);
1382                 bnx2x_panic();
1383                 return -EINVAL;
1384         }
1385 #endif
1386
1387         /* Run through the SGL and compose the fragmented skb */
1388         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391                 /* FW gives the indices of the SGE as if the ring is an array
1392                    (meaning that "next" element will consume 2 indices) */
1393                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394                 rx_pg = &fp->rx_page_ring[sge_idx];
1395                 old_rx_pg = *rx_pg;
1396
1397                 /* If we fail to allocate a substitute page, we simply stop
1398                    where we are and drop the whole packet */
1399                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400                 if (unlikely(err)) {
1401                         fp->eth_q_stats.rx_skb_alloc_failed++;
1402                         return err;
1403                 }
1404
1405                 /* Unmap the page as we r going to pass it to the stack */
1406                 dma_unmap_page(&bp->pdev->dev,
1407                                dma_unmap_addr(&old_rx_pg, mapping),
1408                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1409
1410                 /* Add one frag and update the appropriate fields in the skb */
1411                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413                 skb->data_len += frag_len;
1414                 skb->truesize += frag_len;
1415                 skb->len += frag_len;
1416
1417                 frag_size -= frag_len;
1418         }
1419
1420         return 0;
1421 }
1422
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425                            u16 cqe_idx)
1426 {
1427         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428         struct sk_buff *skb = rx_buf->skb;
1429         /* alloc new skb */
1430         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432         /* Unmap skb in the pool anyway, as we are going to change
1433            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434            fails. */
1435         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436                          bp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438         if (likely(new_skb)) {
1439                 /* fix ip xsum and give it to the stack */
1440                 /* (no need to map the new skb) */
1441 #ifdef BCM_VLAN
1442                 int is_vlan_cqe =
1443                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444                          PARSING_FLAGS_VLAN);
1445                 int is_not_hwaccel_vlan_cqe =
1446                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447 #endif
1448
1449                 prefetch(skb);
1450                 prefetch(((char *)(skb)) + 128);
1451
1452 #ifdef BNX2X_STOP_ON_ERROR
1453                 if (pad + len > bp->rx_buf_size) {
1454                         BNX2X_ERR("skb_put is about to fail...  "
1455                                   "pad %d  len %d  rx_buf_size %d\n",
1456                                   pad, len, bp->rx_buf_size);
1457                         bnx2x_panic();
1458                         return;
1459                 }
1460 #endif
1461
1462                 skb_reserve(skb, pad);
1463                 skb_put(skb, len);
1464
1465                 skb->protocol = eth_type_trans(skb, bp->dev);
1466                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468                 {
1469                         struct iphdr *iph;
1470
1471                         iph = (struct iphdr *)skb->data;
1472 #ifdef BCM_VLAN
1473                         /* If there is no Rx VLAN offloading -
1474                            take VLAN tag into an account */
1475                         if (unlikely(is_not_hwaccel_vlan_cqe))
1476                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477 #endif
1478                         iph->check = 0;
1479                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480                 }
1481
1482                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483                                          &cqe->fast_path_cqe, cqe_idx)) {
1484 #ifdef BCM_VLAN
1485                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486                             (!is_not_hwaccel_vlan_cqe))
1487                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488                                                  le16_to_cpu(cqe->fast_path_cqe.
1489                                                              vlan_tag), skb);
1490                         else
1491 #endif
1492                                 napi_gro_receive(&fp->napi, skb);
1493                 } else {
1494                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495                            " - dropping packet!\n");
1496                         dev_kfree_skb(skb);
1497                 }
1498
1499
1500                 /* put new skb in bin */
1501                 fp->tpa_pool[queue].skb = new_skb;
1502
1503         } else {
1504                 /* else drop the packet and keep the buffer in the bin */
1505                 DP(NETIF_MSG_RX_STATUS,
1506                    "Failed to allocate new skb - dropping packet!\n");
1507                 fp->eth_q_stats.rx_skb_alloc_failed++;
1508         }
1509
1510         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511 }
1512
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514                                         struct bnx2x_fastpath *fp,
1515                                         u16 bd_prod, u16 rx_comp_prod,
1516                                         u16 rx_sge_prod)
1517 {
1518         struct ustorm_eth_rx_producers rx_prods = {0};
1519         int i;
1520
1521         /* Update producers */
1522         rx_prods.bd_prod = bd_prod;
1523         rx_prods.cqe_prod = rx_comp_prod;
1524         rx_prods.sge_prod = rx_sge_prod;
1525
1526         /*
1527          * Make sure that the BD and SGE data is updated before updating the
1528          * producers since FW might read the BD/SGE right after the producer
1529          * is updated.
1530          * This is only applicable for weak-ordered memory model archs such
1531          * as IA-64. The following barrier is also mandatory since FW will
1532          * assumes BDs must have buffers.
1533          */
1534         wmb();
1535
1536         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537                 REG_WR(bp, BAR_USTRORM_INTMEM +
1538                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539                        ((u32 *)&rx_prods)[i]);
1540
1541         mmiowb(); /* keep prod updates ordered */
1542
1543         DP(NETIF_MSG_RX_STATUS,
1544            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1545            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1546 }
1547
1548 /* Set Toeplitz hash value in the skb using the value from the
1549  * CQE (calculated by HW).
1550  */
1551 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
1552                                         struct sk_buff *skb)
1553 {
1554         /* Set Toeplitz hash from CQE */
1555         if ((bp->dev->features & NETIF_F_RXHASH) &&
1556             (cqe->fast_path_cqe.status_flags &
1557              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1558                 skb->rxhash =
1559                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
1560 }
1561
1562 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1563 {
1564         struct bnx2x *bp = fp->bp;
1565         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1566         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1567         int rx_pkt = 0;
1568
1569 #ifdef BNX2X_STOP_ON_ERROR
1570         if (unlikely(bp->panic))
1571                 return 0;
1572 #endif
1573
1574         /* CQ "next element" is of the size of the regular element,
1575            that's why it's ok here */
1576         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1577         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1578                 hw_comp_cons++;
1579
1580         bd_cons = fp->rx_bd_cons;
1581         bd_prod = fp->rx_bd_prod;
1582         bd_prod_fw = bd_prod;
1583         sw_comp_cons = fp->rx_comp_cons;
1584         sw_comp_prod = fp->rx_comp_prod;
1585
1586         /* Memory barrier necessary as speculative reads of the rx
1587          * buffer can be ahead of the index in the status block
1588          */
1589         rmb();
1590
1591         DP(NETIF_MSG_RX_STATUS,
1592            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1593            fp->index, hw_comp_cons, sw_comp_cons);
1594
1595         while (sw_comp_cons != hw_comp_cons) {
1596                 struct sw_rx_bd *rx_buf = NULL;
1597                 struct sk_buff *skb;
1598                 union eth_rx_cqe *cqe;
1599                 u8 cqe_fp_flags;
1600                 u16 len, pad;
1601
1602                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1603                 bd_prod = RX_BD(bd_prod);
1604                 bd_cons = RX_BD(bd_cons);
1605
1606                 /* Prefetch the page containing the BD descriptor
1607                    at producer's index. It will be needed when new skb is
1608                    allocated */
1609                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1610                                              (&fp->rx_desc_ring[bd_prod])) -
1611                                   PAGE_SIZE + 1));
1612
1613                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1614                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1615
1616                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1617                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1618                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1619                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1620                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1621                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1622
1623                 /* is this a slowpath msg? */
1624                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1625                         bnx2x_sp_event(fp, cqe);
1626                         goto next_cqe;
1627
1628                 /* this is an rx packet */
1629                 } else {
1630                         rx_buf = &fp->rx_buf_ring[bd_cons];
1631                         skb = rx_buf->skb;
1632                         prefetch(skb);
1633                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1634                         pad = cqe->fast_path_cqe.placement_offset;
1635
1636                         /* If CQE is marked both TPA_START and TPA_END
1637                            it is a non-TPA CQE */
1638                         if ((!fp->disable_tpa) &&
1639                             (TPA_TYPE(cqe_fp_flags) !=
1640                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1641                                 u16 queue = cqe->fast_path_cqe.queue_index;
1642
1643                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1644                                         DP(NETIF_MSG_RX_STATUS,
1645                                            "calling tpa_start on queue %d\n",
1646                                            queue);
1647
1648                                         bnx2x_tpa_start(fp, queue, skb,
1649                                                         bd_cons, bd_prod);
1650
1651                                         /* Set Toeplitz hash for an LRO skb */
1652                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
1653
1654                                         goto next_rx;
1655                                 }
1656
1657                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1658                                         DP(NETIF_MSG_RX_STATUS,
1659                                            "calling tpa_stop on queue %d\n",
1660                                            queue);
1661
1662                                         if (!BNX2X_RX_SUM_FIX(cqe))
1663                                                 BNX2X_ERR("STOP on none TCP "
1664                                                           "data\n");
1665
1666                                         /* This is a size of the linear data
1667                                            on this skb */
1668                                         len = le16_to_cpu(cqe->fast_path_cqe.
1669                                                                 len_on_bd);
1670                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1671                                                     len, cqe, comp_ring_cons);
1672 #ifdef BNX2X_STOP_ON_ERROR
1673                                         if (bp->panic)
1674                                                 return 0;
1675 #endif
1676
1677                                         bnx2x_update_sge_prod(fp,
1678                                                         &cqe->fast_path_cqe);
1679                                         goto next_cqe;
1680                                 }
1681                         }
1682
1683                         dma_sync_single_for_device(&bp->pdev->dev,
1684                                         dma_unmap_addr(rx_buf, mapping),
1685                                                    pad + RX_COPY_THRESH,
1686                                                    DMA_FROM_DEVICE);
1687                         prefetch(((char *)(skb)) + 128);
1688
1689                         /* is this an error packet? */
1690                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1691                                 DP(NETIF_MSG_RX_ERR,
1692                                    "ERROR  flags %x  rx packet %u\n",
1693                                    cqe_fp_flags, sw_comp_cons);
1694                                 fp->eth_q_stats.rx_err_discard_pkt++;
1695                                 goto reuse_rx;
1696                         }
1697
1698                         /* Since we don't have a jumbo ring
1699                          * copy small packets if mtu > 1500
1700                          */
1701                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1702                             (len <= RX_COPY_THRESH)) {
1703                                 struct sk_buff *new_skb;
1704
1705                                 new_skb = netdev_alloc_skb(bp->dev,
1706                                                            len + pad);
1707                                 if (new_skb == NULL) {
1708                                         DP(NETIF_MSG_RX_ERR,
1709                                            "ERROR  packet dropped "
1710                                            "because of alloc failure\n");
1711                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1712                                         goto reuse_rx;
1713                                 }
1714
1715                                 /* aligned copy */
1716                                 skb_copy_from_linear_data_offset(skb, pad,
1717                                                     new_skb->data + pad, len);
1718                                 skb_reserve(new_skb, pad);
1719                                 skb_put(new_skb, len);
1720
1721                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1722
1723                                 skb = new_skb;
1724
1725                         } else
1726                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1727                                 dma_unmap_single(&bp->pdev->dev,
1728                                         dma_unmap_addr(rx_buf, mapping),
1729                                                  bp->rx_buf_size,
1730                                                  DMA_FROM_DEVICE);
1731                                 skb_reserve(skb, pad);
1732                                 skb_put(skb, len);
1733
1734                         } else {
1735                                 DP(NETIF_MSG_RX_ERR,
1736                                    "ERROR  packet dropped because "
1737                                    "of alloc failure\n");
1738                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1739 reuse_rx:
1740                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1741                                 goto next_rx;
1742                         }
1743
1744                         skb->protocol = eth_type_trans(skb, bp->dev);
1745
1746                         /* Set Toeplitz hash for a none-LRO skb */
1747                         bnx2x_set_skb_rxhash(bp, cqe, skb);
1748
1749                         skb->ip_summed = CHECKSUM_NONE;
1750                         if (bp->rx_csum) {
1751                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1752                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1753                                 else
1754                                         fp->eth_q_stats.hw_csum_err++;
1755                         }
1756                 }
1757
1758                 skb_record_rx_queue(skb, fp->index);
1759
1760 #ifdef BCM_VLAN
1761                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1762                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1763                      PARSING_FLAGS_VLAN))
1764                         vlan_gro_receive(&fp->napi, bp->vlgrp,
1765                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1766                 else
1767 #endif
1768                         napi_gro_receive(&fp->napi, skb);
1769
1770
1771 next_rx:
1772                 rx_buf->skb = NULL;
1773
1774                 bd_cons = NEXT_RX_IDX(bd_cons);
1775                 bd_prod = NEXT_RX_IDX(bd_prod);
1776                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1777                 rx_pkt++;
1778 next_cqe:
1779                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1780                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1781
1782                 if (rx_pkt == budget)
1783                         break;
1784         } /* while */
1785
1786         fp->rx_bd_cons = bd_cons;
1787         fp->rx_bd_prod = bd_prod_fw;
1788         fp->rx_comp_cons = sw_comp_cons;
1789         fp->rx_comp_prod = sw_comp_prod;
1790
1791         /* Update producers */
1792         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1793                              fp->rx_sge_prod);
1794
1795         fp->rx_pkt += rx_pkt;
1796         fp->rx_calls++;
1797
1798         return rx_pkt;
1799 }
1800
1801 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1802 {
1803         struct bnx2x_fastpath *fp = fp_cookie;
1804         struct bnx2x *bp = fp->bp;
1805
1806         /* Return here if interrupt is disabled */
1807         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1808                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1809                 return IRQ_HANDLED;
1810         }
1811
1812         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1813            fp->index, fp->sb_id);
1814         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1815
1816 #ifdef BNX2X_STOP_ON_ERROR
1817         if (unlikely(bp->panic))
1818                 return IRQ_HANDLED;
1819 #endif
1820
1821         /* Handle Rx and Tx according to MSI-X vector */
1822         prefetch(fp->rx_cons_sb);
1823         prefetch(fp->tx_cons_sb);
1824         prefetch(&fp->status_blk->u_status_block.status_block_index);
1825         prefetch(&fp->status_blk->c_status_block.status_block_index);
1826         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1827
1828         return IRQ_HANDLED;
1829 }
1830
1831 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1832 {
1833         struct bnx2x *bp = netdev_priv(dev_instance);
1834         u16 status = bnx2x_ack_int(bp);
1835         u16 mask;
1836         int i;
1837
1838         /* Return here if interrupt is shared and it's not for us */
1839         if (unlikely(status == 0)) {
1840                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1841                 return IRQ_NONE;
1842         }
1843         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1844
1845         /* Return here if interrupt is disabled */
1846         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1847                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1848                 return IRQ_HANDLED;
1849         }
1850
1851 #ifdef BNX2X_STOP_ON_ERROR
1852         if (unlikely(bp->panic))
1853                 return IRQ_HANDLED;
1854 #endif
1855
1856         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1857                 struct bnx2x_fastpath *fp = &bp->fp[i];
1858
1859                 mask = 0x2 << fp->sb_id;
1860                 if (status & mask) {
1861                         /* Handle Rx and Tx according to SB id */
1862                         prefetch(fp->rx_cons_sb);
1863                         prefetch(&fp->status_blk->u_status_block.
1864                                                 status_block_index);
1865                         prefetch(fp->tx_cons_sb);
1866                         prefetch(&fp->status_blk->c_status_block.
1867                                                 status_block_index);
1868                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1869                         status &= ~mask;
1870                 }
1871         }
1872
1873 #ifdef BCM_CNIC
1874         mask = 0x2 << CNIC_SB_ID(bp);
1875         if (status & (mask | 0x1)) {
1876                 struct cnic_ops *c_ops = NULL;
1877
1878                 rcu_read_lock();
1879                 c_ops = rcu_dereference(bp->cnic_ops);
1880                 if (c_ops)
1881                         c_ops->cnic_handler(bp->cnic_data, NULL);
1882                 rcu_read_unlock();
1883
1884                 status &= ~mask;
1885         }
1886 #endif
1887
1888         if (unlikely(status & 0x1)) {
1889                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1890
1891                 status &= ~0x1;
1892                 if (!status)
1893                         return IRQ_HANDLED;
1894         }
1895
1896         if (unlikely(status))
1897                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1898                    status);
1899
1900         return IRQ_HANDLED;
1901 }
1902
1903 /* end of fast path */
1904
1905 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1906
1907 /* Link */
1908
1909 /*
1910  * General service functions
1911  */
1912
1913 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1914 {
1915         u32 lock_status;
1916         u32 resource_bit = (1 << resource);
1917         int func = BP_FUNC(bp);
1918         u32 hw_lock_control_reg;
1919         int cnt;
1920
1921         /* Validating that the resource is within range */
1922         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1923                 DP(NETIF_MSG_HW,
1924                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1925                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1926                 return -EINVAL;
1927         }
1928
1929         if (func <= 5) {
1930                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1931         } else {
1932                 hw_lock_control_reg =
1933                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1934         }
1935
1936         /* Validating that the resource is not already taken */
1937         lock_status = REG_RD(bp, hw_lock_control_reg);
1938         if (lock_status & resource_bit) {
1939                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1940                    lock_status, resource_bit);
1941                 return -EEXIST;
1942         }
1943
1944         /* Try for 5 second every 5ms */
1945         for (cnt = 0; cnt < 1000; cnt++) {
1946                 /* Try to acquire the lock */
1947                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1948                 lock_status = REG_RD(bp, hw_lock_control_reg);
1949                 if (lock_status & resource_bit)
1950                         return 0;
1951
1952                 msleep(5);
1953         }
1954         DP(NETIF_MSG_HW, "Timeout\n");
1955         return -EAGAIN;
1956 }
1957
1958 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1959 {
1960         u32 lock_status;
1961         u32 resource_bit = (1 << resource);
1962         int func = BP_FUNC(bp);
1963         u32 hw_lock_control_reg;
1964
1965         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1966
1967         /* Validating that the resource is within range */
1968         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1969                 DP(NETIF_MSG_HW,
1970                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1971                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1972                 return -EINVAL;
1973         }
1974
1975         if (func <= 5) {
1976                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1977         } else {
1978                 hw_lock_control_reg =
1979                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1980         }
1981
1982         /* Validating that the resource is currently taken */
1983         lock_status = REG_RD(bp, hw_lock_control_reg);
1984         if (!(lock_status & resource_bit)) {
1985                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1986                    lock_status, resource_bit);
1987                 return -EFAULT;
1988         }
1989
1990         REG_WR(bp, hw_lock_control_reg, resource_bit);
1991         return 0;
1992 }
1993
1994 /* HW Lock for shared dual port PHYs */
1995 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1996 {
1997         mutex_lock(&bp->port.phy_mutex);
1998
1999         if (bp->port.need_hw_lock)
2000                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
2001 }
2002
2003 static void bnx2x_release_phy_lock(struct bnx2x *bp)
2004 {
2005         if (bp->port.need_hw_lock)
2006                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
2007
2008         mutex_unlock(&bp->port.phy_mutex);
2009 }
2010
2011 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2012 {
2013         /* The GPIO should be swapped if swap register is set and active */
2014         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2015                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2016         int gpio_shift = gpio_num +
2017                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2018         u32 gpio_mask = (1 << gpio_shift);
2019         u32 gpio_reg;
2020         int value;
2021
2022         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2024                 return -EINVAL;
2025         }
2026
2027         /* read GPIO value */
2028         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2029
2030         /* get the requested pin value */
2031         if ((gpio_reg & gpio_mask) == gpio_mask)
2032                 value = 1;
2033         else
2034                 value = 0;
2035
2036         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2037
2038         return value;
2039 }
2040
2041 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2042 {
2043         /* The GPIO should be swapped if swap register is set and active */
2044         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046         int gpio_shift = gpio_num +
2047                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048         u32 gpio_mask = (1 << gpio_shift);
2049         u32 gpio_reg;
2050
2051         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2053                 return -EINVAL;
2054         }
2055
2056         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057         /* read GPIO and mask except the float bits */
2058         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2059
2060         switch (mode) {
2061         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2062                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2063                    gpio_num, gpio_shift);
2064                 /* clear FLOAT and set CLR */
2065                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2066                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2067                 break;
2068
2069         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2070                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2071                    gpio_num, gpio_shift);
2072                 /* clear FLOAT and set SET */
2073                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2074                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2075                 break;
2076
2077         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2078                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2079                    gpio_num, gpio_shift);
2080                 /* set FLOAT */
2081                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2082                 break;
2083
2084         default:
2085                 break;
2086         }
2087
2088         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2089         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2090
2091         return 0;
2092 }
2093
2094 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2095 {
2096         /* The GPIO should be swapped if swap register is set and active */
2097         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2098                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2099         int gpio_shift = gpio_num +
2100                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2101         u32 gpio_mask = (1 << gpio_shift);
2102         u32 gpio_reg;
2103
2104         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2105                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2106                 return -EINVAL;
2107         }
2108
2109         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2110         /* read GPIO int */
2111         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2112
2113         switch (mode) {
2114         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2115                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2116                                    "output low\n", gpio_num, gpio_shift);
2117                 /* clear SET and set CLR */
2118                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2119                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2120                 break;
2121
2122         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2123                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2124                                    "output high\n", gpio_num, gpio_shift);
2125                 /* clear CLR and set SET */
2126                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2127                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2128                 break;
2129
2130         default:
2131                 break;
2132         }
2133
2134         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2135         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2136
2137         return 0;
2138 }
2139
2140 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2141 {
2142         u32 spio_mask = (1 << spio_num);
2143         u32 spio_reg;
2144
2145         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2146             (spio_num > MISC_REGISTERS_SPIO_7)) {
2147                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2148                 return -EINVAL;
2149         }
2150
2151         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2152         /* read SPIO and mask except the float bits */
2153         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2154
2155         switch (mode) {
2156         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2157                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2158                 /* clear FLOAT and set CLR */
2159                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2161                 break;
2162
2163         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2164                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2165                 /* clear FLOAT and set SET */
2166                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2167                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2168                 break;
2169
2170         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2171                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2172                 /* set FLOAT */
2173                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2174                 break;
2175
2176         default:
2177                 break;
2178         }
2179
2180         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2181         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2182
2183         return 0;
2184 }
2185
2186 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2187 {
2188         switch (bp->link_vars.ieee_fc &
2189                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2190         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2191                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2192                                           ADVERTISED_Pause);
2193                 break;
2194
2195         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2196                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2197                                          ADVERTISED_Pause);
2198                 break;
2199
2200         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2201                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2202                 break;
2203
2204         default:
2205                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2206                                           ADVERTISED_Pause);
2207                 break;
2208         }
2209 }
2210
2211 static void bnx2x_link_report(struct bnx2x *bp)
2212 {
2213         if (bp->flags & MF_FUNC_DIS) {
2214                 netif_carrier_off(bp->dev);
2215                 netdev_err(bp->dev, "NIC Link is Down\n");
2216                 return;
2217         }
2218
2219         if (bp->link_vars.link_up) {
2220                 u16 line_speed;
2221
2222                 if (bp->state == BNX2X_STATE_OPEN)
2223                         netif_carrier_on(bp->dev);
2224                 netdev_info(bp->dev, "NIC Link is Up, ");
2225
2226                 line_speed = bp->link_vars.line_speed;
2227                 if (IS_E1HMF(bp)) {
2228                         u16 vn_max_rate;
2229
2230                         vn_max_rate =
2231                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2232                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2233                         if (vn_max_rate < line_speed)
2234                                 line_speed = vn_max_rate;
2235                 }
2236                 pr_cont("%d Mbps ", line_speed);
2237
2238                 if (bp->link_vars.duplex == DUPLEX_FULL)
2239                         pr_cont("full duplex");
2240                 else
2241                         pr_cont("half duplex");
2242
2243                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2244                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2245                                 pr_cont(", receive ");
2246                                 if (bp->link_vars.flow_ctrl &
2247                                     BNX2X_FLOW_CTRL_TX)
2248                                         pr_cont("& transmit ");
2249                         } else {
2250                                 pr_cont(", transmit ");
2251                         }
2252                         pr_cont("flow control ON");
2253                 }
2254                 pr_cont("\n");
2255
2256         } else { /* link_down */
2257                 netif_carrier_off(bp->dev);
2258                 netdev_err(bp->dev, "NIC Link is Down\n");
2259         }
2260 }
2261
2262 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2263 {
2264         if (!BP_NOMCP(bp)) {
2265                 u8 rc;
2266
2267                 /* Initialize link parameters structure variables */
2268                 /* It is recommended to turn off RX FC for jumbo frames
2269                    for better performance */
2270                 if (bp->dev->mtu > 5000)
2271                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2272                 else
2273                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2274
2275                 bnx2x_acquire_phy_lock(bp);
2276
2277                 if (load_mode == LOAD_DIAG)
2278                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2279
2280                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2281
2282                 bnx2x_release_phy_lock(bp);
2283
2284                 bnx2x_calc_fc_adv(bp);
2285
2286                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2287                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2288                         bnx2x_link_report(bp);
2289                 }
2290
2291                 return rc;
2292         }
2293         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2294         return -EINVAL;
2295 }
2296
2297 static void bnx2x_link_set(struct bnx2x *bp)
2298 {
2299         if (!BP_NOMCP(bp)) {
2300                 bnx2x_acquire_phy_lock(bp);
2301                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2302                 bnx2x_release_phy_lock(bp);
2303
2304                 bnx2x_calc_fc_adv(bp);
2305         } else
2306                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2307 }
2308
2309 static void bnx2x__link_reset(struct bnx2x *bp)
2310 {
2311         if (!BP_NOMCP(bp)) {
2312                 bnx2x_acquire_phy_lock(bp);
2313                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2314                 bnx2x_release_phy_lock(bp);
2315         } else
2316                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2317 }
2318
2319 static u8 bnx2x_link_test(struct bnx2x *bp)
2320 {
2321         u8 rc = 0;
2322
2323         if (!BP_NOMCP(bp)) {
2324                 bnx2x_acquire_phy_lock(bp);
2325                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2326                 bnx2x_release_phy_lock(bp);
2327         } else
2328                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2329
2330         return rc;
2331 }
2332
2333 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2334 {
2335         u32 r_param = bp->link_vars.line_speed / 8;
2336         u32 fair_periodic_timeout_usec;
2337         u32 t_fair;
2338
2339         memset(&(bp->cmng.rs_vars), 0,
2340                sizeof(struct rate_shaping_vars_per_port));
2341         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2342
2343         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2344         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2345
2346         /* this is the threshold below which no timer arming will occur
2347            1.25 coefficient is for the threshold to be a little bigger
2348            than the real time, to compensate for timer in-accuracy */
2349         bp->cmng.rs_vars.rs_threshold =
2350                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2351
2352         /* resolution of fairness timer */
2353         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2354         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2355         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2356
2357         /* this is the threshold below which we won't arm the timer anymore */
2358         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2359
2360         /* we multiply by 1e3/8 to get bytes/msec.
2361            We don't want the credits to pass a credit
2362            of the t_fair*FAIR_MEM (algorithm resolution) */
2363         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2364         /* since each tick is 4 usec */
2365         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2366 }
2367
2368 /* Calculates the sum of vn_min_rates.
2369    It's needed for further normalizing of the min_rates.
2370    Returns:
2371      sum of vn_min_rates.
2372        or
2373      0 - if all the min_rates are 0.
2374      In the later case fainess algorithm should be deactivated.
2375      If not all min_rates are zero then those that are zeroes will be set to 1.
2376  */
2377 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2378 {
2379         int all_zero = 1;
2380         int port = BP_PORT(bp);
2381         int vn;
2382
2383         bp->vn_weight_sum = 0;
2384         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2385                 int func = 2*vn + port;
2386                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2387                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2388                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2389
2390                 /* Skip hidden vns */
2391                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2392                         continue;
2393
2394                 /* If min rate is zero - set it to 1 */
2395                 if (!vn_min_rate)
2396                         vn_min_rate = DEF_MIN_RATE;
2397                 else
2398                         all_zero = 0;
2399
2400                 bp->vn_weight_sum += vn_min_rate;
2401         }
2402
2403         /* ... only if all min rates are zeros - disable fairness */
2404         if (all_zero) {
2405                 bp->cmng.flags.cmng_enables &=
2406                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2407                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2408                    "  fairness will be disabled\n");
2409         } else
2410                 bp->cmng.flags.cmng_enables |=
2411                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2412 }
2413
2414 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2415 {
2416         struct rate_shaping_vars_per_vn m_rs_vn;
2417         struct fairness_vars_per_vn m_fair_vn;
2418         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2419         u16 vn_min_rate, vn_max_rate;
2420         int i;
2421
2422         /* If function is hidden - set min and max to zeroes */
2423         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2424                 vn_min_rate = 0;
2425                 vn_max_rate = 0;
2426
2427         } else {
2428                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2429                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2430                 /* If min rate is zero - set it to 1 */
2431                 if (!vn_min_rate)
2432                         vn_min_rate = DEF_MIN_RATE;
2433                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2434                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2435         }
2436         DP(NETIF_MSG_IFUP,
2437            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2438            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2439
2440         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2441         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2442
2443         /* global vn counter - maximal Mbps for this vn */
2444         m_rs_vn.vn_counter.rate = vn_max_rate;
2445
2446         /* quota - number of bytes transmitted in this period */
2447         m_rs_vn.vn_counter.quota =
2448                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2449
2450         if (bp->vn_weight_sum) {
2451                 /* credit for each period of the fairness algorithm:
2452                    number of bytes in T_FAIR (the vn share the port rate).
2453                    vn_weight_sum should not be larger than 10000, thus
2454                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2455                    than zero */
2456                 m_fair_vn.vn_credit_delta =
2457                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2458                                                    (8 * bp->vn_weight_sum))),
2459                               (bp->cmng.fair_vars.fair_threshold * 2));
2460                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2461                    m_fair_vn.vn_credit_delta);
2462         }
2463
2464         /* Store it to internal memory */
2465         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2466                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2467                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2468                        ((u32 *)(&m_rs_vn))[i]);
2469
2470         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2471                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2472                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2473                        ((u32 *)(&m_fair_vn))[i]);
2474 }
2475
2476
2477 /* This function is called upon link interrupt */
2478 static void bnx2x_link_attn(struct bnx2x *bp)
2479 {
2480         u32 prev_link_status = bp->link_vars.link_status;
2481         /* Make sure that we are synced with the current statistics */
2482         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2483
2484         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2485
2486         if (bp->link_vars.link_up) {
2487
2488                 /* dropless flow control */
2489                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2490                         int port = BP_PORT(bp);
2491                         u32 pause_enabled = 0;
2492
2493                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2494                                 pause_enabled = 1;
2495
2496                         REG_WR(bp, BAR_USTRORM_INTMEM +
2497                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2498                                pause_enabled);
2499                 }
2500
2501                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2502                         struct host_port_stats *pstats;
2503
2504                         pstats = bnx2x_sp(bp, port_stats);
2505                         /* reset old bmac stats */
2506                         memset(&(pstats->mac_stx[0]), 0,
2507                                sizeof(struct mac_stx));
2508                 }
2509                 if (bp->state == BNX2X_STATE_OPEN)
2510                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2511         }
2512
2513         /* indicate link status only if link status actually changed */
2514         if (prev_link_status != bp->link_vars.link_status)
2515                 bnx2x_link_report(bp);
2516
2517         if (IS_E1HMF(bp)) {
2518                 int port = BP_PORT(bp);
2519                 int func;
2520                 int vn;
2521
2522                 /* Set the attention towards other drivers on the same port */
2523                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2524                         if (vn == BP_E1HVN(bp))
2525                                 continue;
2526
2527                         func = ((vn << 1) | port);
2528                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2529                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2530                 }
2531
2532                 if (bp->link_vars.link_up) {
2533                         int i;
2534
2535                         /* Init rate shaping and fairness contexts */
2536                         bnx2x_init_port_minmax(bp);
2537
2538                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2539                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2540
2541                         /* Store it to internal memory */
2542                         for (i = 0;
2543                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2544                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2545                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2546                                        ((u32 *)(&bp->cmng))[i]);
2547                 }
2548         }
2549 }
2550
2551 static void bnx2x__link_status_update(struct bnx2x *bp)
2552 {
2553         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2554                 return;
2555
2556         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2557
2558         if (bp->link_vars.link_up)
2559                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2560         else
2561                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2562
2563         bnx2x_calc_vn_weight_sum(bp);
2564
2565         /* indicate link status */
2566         bnx2x_link_report(bp);
2567 }
2568
2569 static void bnx2x_pmf_update(struct bnx2x *bp)
2570 {
2571         int port = BP_PORT(bp);
2572         u32 val;
2573
2574         bp->port.pmf = 1;
2575         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2576
2577         /* enable nig attention */
2578         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2579         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2580         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2581
2582         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2583 }
2584
2585 /* end of Link */
2586
2587 /* slow path */
2588
2589 /*
2590  * General service functions
2591  */
2592
2593 /* send the MCP a request, block until there is a reply */
2594 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2595 {
2596         int func = BP_FUNC(bp);
2597         u32 seq = ++bp->fw_seq;
2598         u32 rc = 0;
2599         u32 cnt = 1;
2600         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2601
2602         mutex_lock(&bp->fw_mb_mutex);
2603         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2604         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2605
2606         do {
2607                 /* let the FW do it's magic ... */
2608                 msleep(delay);
2609
2610                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2611
2612                 /* Give the FW up to 5 second (500*10ms) */
2613         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2614
2615         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2616            cnt*delay, rc, seq);
2617
2618         /* is this a reply to our command? */
2619         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2620                 rc &= FW_MSG_CODE_MASK;
2621         else {
2622                 /* FW BUG! */
2623                 BNX2X_ERR("FW failed to respond!\n");
2624                 bnx2x_fw_dump(bp);
2625                 rc = 0;
2626         }
2627         mutex_unlock(&bp->fw_mb_mutex);
2628
2629         return rc;
2630 }
2631
2632 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2633 static void bnx2x_set_rx_mode(struct net_device *dev);
2634
2635 static void bnx2x_e1h_disable(struct bnx2x *bp)
2636 {
2637         int port = BP_PORT(bp);
2638
2639         netif_tx_disable(bp->dev);
2640
2641         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2642
2643         netif_carrier_off(bp->dev);
2644 }
2645
2646 static void bnx2x_e1h_enable(struct bnx2x *bp)
2647 {
2648         int port = BP_PORT(bp);
2649
2650         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2651
2652         /* Tx queue should be only reenabled */
2653         netif_tx_wake_all_queues(bp->dev);
2654
2655         /*
2656          * Should not call netif_carrier_on since it will be called if the link
2657          * is up when checking for link state
2658          */
2659 }
2660
2661 static void bnx2x_update_min_max(struct bnx2x *bp)
2662 {
2663         int port = BP_PORT(bp);
2664         int vn, i;
2665
2666         /* Init rate shaping and fairness contexts */
2667         bnx2x_init_port_minmax(bp);
2668
2669         bnx2x_calc_vn_weight_sum(bp);
2670
2671         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2672                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2673
2674         if (bp->port.pmf) {
2675                 int func;
2676
2677                 /* Set the attention towards other drivers on the same port */
2678                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2679                         if (vn == BP_E1HVN(bp))
2680                                 continue;
2681
2682                         func = ((vn << 1) | port);
2683                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2684                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2685                 }
2686
2687                 /* Store it to internal memory */
2688                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2689                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2690                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2691                                ((u32 *)(&bp->cmng))[i]);
2692         }
2693 }
2694
2695 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2696 {
2697         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2698
2699         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2700
2701                 /*
2702                  * This is the only place besides the function initialization
2703                  * where the bp->flags can change so it is done without any
2704                  * locks
2705                  */
2706                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2707                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2708                         bp->flags |= MF_FUNC_DIS;
2709
2710                         bnx2x_e1h_disable(bp);
2711                 } else {
2712                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2713                         bp->flags &= ~MF_FUNC_DIS;
2714
2715                         bnx2x_e1h_enable(bp);
2716                 }
2717                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2718         }
2719         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2720
2721                 bnx2x_update_min_max(bp);
2722                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2723         }
2724
2725         /* Report results to MCP */
2726         if (dcc_event)
2727                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2728         else
2729                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2730 }
2731
2732 /* must be called under the spq lock */
2733 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2734 {
2735         struct eth_spe *next_spe = bp->spq_prod_bd;
2736
2737         if (bp->spq_prod_bd == bp->spq_last_bd) {
2738                 bp->spq_prod_bd = bp->spq;
2739                 bp->spq_prod_idx = 0;
2740                 DP(NETIF_MSG_TIMER, "end of spq\n");
2741         } else {
2742                 bp->spq_prod_bd++;
2743                 bp->spq_prod_idx++;
2744         }
2745         return next_spe;
2746 }
2747
2748 /* must be called under the spq lock */
2749 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2750 {
2751         int func = BP_FUNC(bp);
2752
2753         /* Make sure that BD data is updated before writing the producer */
2754         wmb();
2755
2756         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2757                bp->spq_prod_idx);
2758         mmiowb();
2759 }
2760
2761 /* the slow path queue is odd since completions arrive on the fastpath ring */
2762 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2763                          u32 data_hi, u32 data_lo, int common)
2764 {
2765         struct eth_spe *spe;
2766
2767 #ifdef BNX2X_STOP_ON_ERROR
2768         if (unlikely(bp->panic))
2769                 return -EIO;
2770 #endif
2771
2772         spin_lock_bh(&bp->spq_lock);
2773
2774         if (!bp->spq_left) {
2775                 BNX2X_ERR("BUG! SPQ ring full!\n");
2776                 spin_unlock_bh(&bp->spq_lock);
2777                 bnx2x_panic();
2778                 return -EBUSY;
2779         }
2780
2781         spe = bnx2x_sp_get_next(bp);
2782
2783         /* CID needs port number to be encoded int it */
2784         spe->hdr.conn_and_cmd_data =
2785                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2786                                     HW_CID(bp, cid));
2787         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2788         if (common)
2789                 spe->hdr.type |=
2790                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2791
2792         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2793         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2794
2795         bp->spq_left--;
2796
2797         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2798            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2799            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2800            (u32)(U64_LO(bp->spq_mapping) +
2801            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2802            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2803
2804         bnx2x_sp_prod_update(bp);
2805         spin_unlock_bh(&bp->spq_lock);
2806         return 0;
2807 }
2808
2809 /* acquire split MCP access lock register */
2810 static int bnx2x_acquire_alr(struct bnx2x *bp)
2811 {
2812         u32 j, val;
2813         int rc = 0;
2814
2815         might_sleep();
2816         for (j = 0; j < 1000; j++) {
2817                 val = (1UL << 31);
2818                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2819                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2820                 if (val & (1L << 31))
2821                         break;
2822
2823                 msleep(5);
2824         }
2825         if (!(val & (1L << 31))) {
2826                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2827                 rc = -EBUSY;
2828         }
2829
2830         return rc;
2831 }
2832
2833 /* release split MCP access lock register */
2834 static void bnx2x_release_alr(struct bnx2x *bp)
2835 {
2836         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2837 }
2838
2839 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2840 {
2841         struct host_def_status_block *def_sb = bp->def_status_blk;
2842         u16 rc = 0;
2843
2844         barrier(); /* status block is written to by the chip */
2845         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2846                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2847                 rc |= 1;
2848         }
2849         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2850                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2851                 rc |= 2;
2852         }
2853         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2854                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2855                 rc |= 4;
2856         }
2857         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2858                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2859                 rc |= 8;
2860         }
2861         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2862                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2863                 rc |= 16;
2864         }
2865         return rc;
2866 }
2867
2868 /*
2869  * slow path service functions
2870  */
2871
2872 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2873 {
2874         int port = BP_PORT(bp);
2875         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2876                        COMMAND_REG_ATTN_BITS_SET);
2877         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2878                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2879         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2880                                        NIG_REG_MASK_INTERRUPT_PORT0;
2881         u32 aeu_mask;
2882         u32 nig_mask = 0;
2883
2884         if (bp->attn_state & asserted)
2885                 BNX2X_ERR("IGU ERROR\n");
2886
2887         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2888         aeu_mask = REG_RD(bp, aeu_addr);
2889
2890         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2891            aeu_mask, asserted);
2892         aeu_mask &= ~(asserted & 0x3ff);
2893         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2894
2895         REG_WR(bp, aeu_addr, aeu_mask);
2896         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2897
2898         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2899         bp->attn_state |= asserted;
2900         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2901
2902         if (asserted & ATTN_HARD_WIRED_MASK) {
2903                 if (asserted & ATTN_NIG_FOR_FUNC) {
2904
2905                         bnx2x_acquire_phy_lock(bp);
2906
2907                         /* save nig interrupt mask */
2908                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2909                         REG_WR(bp, nig_int_mask_addr, 0);
2910
2911                         bnx2x_link_attn(bp);
2912
2913                         /* handle unicore attn? */
2914                 }
2915                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2916                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2917
2918                 if (asserted & GPIO_2_FUNC)
2919                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2920
2921                 if (asserted & GPIO_3_FUNC)
2922                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2923
2924                 if (asserted & GPIO_4_FUNC)
2925                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2926
2927                 if (port == 0) {
2928                         if (asserted & ATTN_GENERAL_ATTN_1) {
2929                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2930                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2931                         }
2932                         if (asserted & ATTN_GENERAL_ATTN_2) {
2933                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2934                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2935                         }
2936                         if (asserted & ATTN_GENERAL_ATTN_3) {
2937                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2938                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2939                         }
2940                 } else {
2941                         if (asserted & ATTN_GENERAL_ATTN_4) {
2942                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2943                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2944                         }
2945                         if (asserted & ATTN_GENERAL_ATTN_5) {
2946                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2947                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2948                         }
2949                         if (asserted & ATTN_GENERAL_ATTN_6) {
2950                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2951                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2952                         }
2953                 }
2954
2955         } /* if hardwired */
2956
2957         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2958            asserted, hc_addr);
2959         REG_WR(bp, hc_addr, asserted);
2960
2961         /* now set back the mask */
2962         if (asserted & ATTN_NIG_FOR_FUNC) {
2963                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2964                 bnx2x_release_phy_lock(bp);
2965         }
2966 }
2967
2968 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2969 {
2970         int port = BP_PORT(bp);
2971
2972         /* mark the failure */
2973         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2974         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2975         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2976                  bp->link_params.ext_phy_config);
2977
2978         /* log the failure */
2979         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2980                " the driver to shutdown the card to prevent permanent"
2981                " damage.  Please contact OEM Support for assistance\n");
2982 }
2983
2984 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2985 {
2986         int port = BP_PORT(bp);
2987         int reg_offset;
2988         u32 val, swap_val, swap_override;
2989
2990         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2991                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2992
2993         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2994
2995                 val = REG_RD(bp, reg_offset);
2996                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2997                 REG_WR(bp, reg_offset, val);
2998
2999                 BNX2X_ERR("SPIO5 hw attention\n");
3000
3001                 /* Fan failure attention */
3002                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
3003                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3004                         /* Low power mode is controlled by GPIO 2 */
3005                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3006                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3007                         /* The PHY reset is controlled by GPIO 1 */
3008                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3009                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3010                         break;
3011
3012                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
3013                         /* The PHY reset is controlled by GPIO 1 */
3014                         /* fake the port number to cancel the swap done in
3015                            set_gpio() */
3016                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3017                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3018                         port = (swap_val && swap_override) ^ 1;
3019                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3020                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3021                         break;
3022
3023                 default:
3024                         break;
3025                 }
3026                 bnx2x_fan_failure(bp);
3027         }
3028
3029         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3030                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3031                 bnx2x_acquire_phy_lock(bp);
3032                 bnx2x_handle_module_detect_int(&bp->link_params);
3033                 bnx2x_release_phy_lock(bp);
3034         }
3035
3036         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3037
3038                 val = REG_RD(bp, reg_offset);
3039                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3040                 REG_WR(bp, reg_offset, val);
3041
3042                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3043                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3044                 bnx2x_panic();
3045         }
3046 }
3047
3048 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3049 {
3050         u32 val;
3051
3052         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3053
3054                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3055                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3056                 /* DORQ discard attention */
3057                 if (val & 0x2)
3058                         BNX2X_ERR("FATAL error from DORQ\n");
3059         }
3060
3061         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3062
3063                 int port = BP_PORT(bp);
3064                 int reg_offset;
3065
3066                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3067                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3068
3069                 val = REG_RD(bp, reg_offset);
3070                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3071                 REG_WR(bp, reg_offset, val);
3072
3073                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3074                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3075                 bnx2x_panic();
3076         }
3077 }
3078
3079 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3080 {
3081         u32 val;
3082
3083         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3084
3085                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3086                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3087                 /* CFC error attention */
3088                 if (val & 0x2)
3089                         BNX2X_ERR("FATAL error from CFC\n");
3090         }
3091
3092         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3093
3094                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3095                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3096                 /* RQ_USDMDP_FIFO_OVERFLOW */
3097                 if (val & 0x18000)
3098                         BNX2X_ERR("FATAL error from PXP\n");
3099         }
3100
3101         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3102
3103                 int port = BP_PORT(bp);
3104                 int reg_offset;
3105
3106                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3107                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3108
3109                 val = REG_RD(bp, reg_offset);
3110                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3111                 REG_WR(bp, reg_offset, val);
3112
3113                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3114                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3115                 bnx2x_panic();
3116         }
3117 }
3118
3119 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3120 {
3121         u32 val;
3122
3123         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3124
3125                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3126                         int func = BP_FUNC(bp);
3127
3128                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3129                         bp->mf_config = SHMEM_RD(bp,
3130                                            mf_cfg.func_mf_config[func].config);
3131                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3132                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3133                                 bnx2x_dcc_event(bp,
3134                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3135                         bnx2x__link_status_update(bp);
3136                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3137                                 bnx2x_pmf_update(bp);
3138
3139                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3140
3141                         BNX2X_ERR("MC assert!\n");
3142                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3143                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3144                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3145                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3146                         bnx2x_panic();
3147
3148                 } else if (attn & BNX2X_MCP_ASSERT) {
3149
3150                         BNX2X_ERR("MCP assert!\n");
3151                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3152                         bnx2x_fw_dump(bp);
3153
3154                 } else
3155                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3156         }
3157
3158         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3159                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3160                 if (attn & BNX2X_GRC_TIMEOUT) {
3161                         val = CHIP_IS_E1H(bp) ?
3162                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3163                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3164                 }
3165                 if (attn & BNX2X_GRC_RSV) {
3166                         val = CHIP_IS_E1H(bp) ?
3167                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3168                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3169                 }
3170                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3171         }
3172 }
3173
3174 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3175 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3176
3177
3178 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3179 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3180 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3181 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3182 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3183 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3184 /*
3185  * should be run under rtnl lock
3186  */
3187 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3188 {
3189         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3191         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3192         barrier();
3193         mmiowb();
3194 }
3195
3196 /*
3197  * should be run under rtnl lock
3198  */
3199 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3200 {
3201         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3202         val |= (1 << 16);
3203         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3204         barrier();
3205         mmiowb();
3206 }
3207
3208 /*
3209  * should be run under rtnl lock
3210  */
3211 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3212 {
3213         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3214         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3215         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3216 }
3217
3218 /*
3219  * should be run under rtnl lock
3220  */
3221 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3222 {
3223         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3224
3225         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3226
3227         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3228         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3229         barrier();
3230         mmiowb();
3231 }
3232
3233 /*
3234  * should be run under rtnl lock
3235  */
3236 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3237 {
3238         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3239
3240         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3241
3242         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3243         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3244         barrier();
3245         mmiowb();
3246
3247         return val1;
3248 }
3249
3250 /*
3251  * should be run under rtnl lock
3252  */
3253 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3254 {
3255         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3256 }
3257
3258 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3259 {
3260         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3261         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3262 }
3263
3264 static inline void _print_next_block(int idx, const char *blk)
3265 {
3266         if (idx)
3267                 pr_cont(", ");
3268         pr_cont("%s", blk);
3269 }
3270
3271 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3272 {
3273         int i = 0;
3274         u32 cur_bit = 0;
3275         for (i = 0; sig; i++) {
3276                 cur_bit = ((u32)0x1 << i);
3277                 if (sig & cur_bit) {
3278                         switch (cur_bit) {
3279                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3280                                 _print_next_block(par_num++, "BRB");
3281                                 break;
3282                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3283                                 _print_next_block(par_num++, "PARSER");
3284                                 break;
3285                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3286                                 _print_next_block(par_num++, "TSDM");
3287                                 break;
3288                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3289                                 _print_next_block(par_num++, "SEARCHER");
3290                                 break;
3291                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3292                                 _print_next_block(par_num++, "TSEMI");
3293                                 break;
3294                         }
3295
3296                         /* Clear the bit */
3297                         sig &= ~cur_bit;
3298                 }
3299         }
3300
3301         return par_num;
3302 }
3303
3304 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3305 {
3306         int i = 0;
3307         u32 cur_bit = 0;
3308         for (i = 0; sig; i++) {
3309                 cur_bit = ((u32)0x1 << i);
3310                 if (sig & cur_bit) {
3311                         switch (cur_bit) {
3312                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3313                                 _print_next_block(par_num++, "PBCLIENT");
3314                                 break;
3315                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3316                                 _print_next_block(par_num++, "QM");
3317                                 break;
3318                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3319                                 _print_next_block(par_num++, "XSDM");
3320                                 break;
3321                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3322                                 _print_next_block(par_num++, "XSEMI");
3323                                 break;
3324                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3325                                 _print_next_block(par_num++, "DOORBELLQ");
3326                                 break;
3327                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3328                                 _print_next_block(par_num++, "VAUX PCI CORE");
3329                                 break;
3330                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3331                                 _print_next_block(par_num++, "DEBUG");
3332                                 break;
3333                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3334                                 _print_next_block(par_num++, "USDM");
3335                                 break;
3336                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3337                                 _print_next_block(par_num++, "USEMI");
3338                                 break;
3339                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3340                                 _print_next_block(par_num++, "UPB");
3341                                 break;
3342                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3343                                 _print_next_block(par_num++, "CSDM");
3344                                 break;
3345                         }
3346
3347                         /* Clear the bit */
3348                         sig &= ~cur_bit;
3349                 }
3350         }
3351
3352         return par_num;
3353 }
3354
3355 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3356 {
3357         int i = 0;
3358         u32 cur_bit = 0;
3359         for (i = 0; sig; i++) {
3360                 cur_bit = ((u32)0x1 << i);
3361                 if (sig & cur_bit) {
3362                         switch (cur_bit) {
3363                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3364                                 _print_next_block(par_num++, "CSEMI");
3365                                 break;
3366                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3367                                 _print_next_block(par_num++, "PXP");
3368                                 break;
3369                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3370                                 _print_next_block(par_num++,
3371                                         "PXPPCICLOCKCLIENT");
3372                                 break;
3373                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3374                                 _print_next_block(par_num++, "CFC");
3375                                 break;
3376                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3377                                 _print_next_block(par_num++, "CDU");
3378                                 break;
3379                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3380                                 _print_next_block(par_num++, "IGU");
3381                                 break;
3382                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3383                                 _print_next_block(par_num++, "MISC");
3384                                 break;
3385                         }
3386
3387                         /* Clear the bit */
3388                         sig &= ~cur_bit;
3389                 }
3390         }
3391
3392         return par_num;
3393 }
3394
3395 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3396 {
3397         int i = 0;
3398         u32 cur_bit = 0;
3399         for (i = 0; sig; i++) {
3400                 cur_bit = ((u32)0x1 << i);
3401                 if (sig & cur_bit) {
3402                         switch (cur_bit) {
3403                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3404                                 _print_next_block(par_num++, "MCP ROM");
3405                                 break;
3406                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3407                                 _print_next_block(par_num++, "MCP UMP RX");
3408                                 break;
3409                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3410                                 _print_next_block(par_num++, "MCP UMP TX");
3411                                 break;
3412                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3413                                 _print_next_block(par_num++, "MCP SCPAD");
3414                                 break;
3415                         }
3416
3417                         /* Clear the bit */
3418                         sig &= ~cur_bit;
3419                 }
3420         }
3421
3422         return par_num;
3423 }
3424
3425 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3426                                      u32 sig2, u32 sig3)
3427 {
3428         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3429             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3430                 int par_num = 0;
3431                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3432                         "[0]:0x%08x [1]:0x%08x "
3433                         "[2]:0x%08x [3]:0x%08x\n",
3434                           sig0 & HW_PRTY_ASSERT_SET_0,
3435                           sig1 & HW_PRTY_ASSERT_SET_1,
3436                           sig2 & HW_PRTY_ASSERT_SET_2,
3437                           sig3 & HW_PRTY_ASSERT_SET_3);
3438                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3439                        bp->dev->name);
3440                 par_num = bnx2x_print_blocks_with_parity0(
3441                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3442                 par_num = bnx2x_print_blocks_with_parity1(
3443                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3444                 par_num = bnx2x_print_blocks_with_parity2(
3445                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3446                 par_num = bnx2x_print_blocks_with_parity3(
3447                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3448                 printk("\n");
3449                 return true;
3450         } else
3451                 return false;
3452 }
3453
3454 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3455 {
3456         struct attn_route attn;
3457         int port = BP_PORT(bp);
3458
3459         attn.sig[0] = REG_RD(bp,
3460                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3461                              port*4);
3462         attn.sig[1] = REG_RD(bp,
3463                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3464                              port*4);
3465         attn.sig[2] = REG_RD(bp,
3466                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3467                              port*4);
3468         attn.sig[3] = REG_RD(bp,
3469                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3470                              port*4);
3471
3472         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3473                                         attn.sig[3]);
3474 }
3475
3476 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3477 {
3478         struct attn_route attn, *group_mask;
3479         int port = BP_PORT(bp);
3480         int index;
3481         u32 reg_addr;
3482         u32 val;
3483         u32 aeu_mask;
3484
3485         /* need to take HW lock because MCP or other port might also
3486            try to handle this event */
3487         bnx2x_acquire_alr(bp);
3488
3489         if (bnx2x_chk_parity_attn(bp)) {
3490                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3491                 bnx2x_set_reset_in_progress(bp);
3492                 schedule_delayed_work(&bp->reset_task, 0);
3493                 /* Disable HW interrupts */
3494                 bnx2x_int_disable(bp);
3495                 bnx2x_release_alr(bp);
3496                 /* In case of parity errors don't handle attentions so that
3497                  * other function would "see" parity errors.
3498                  */
3499                 return;
3500         }
3501
3502         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3503         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3504         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3505         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3506         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3507            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3508
3509         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3510                 if (deasserted & (1 << index)) {
3511                         group_mask = &bp->attn_group[index];
3512
3513                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3514                            index, group_mask->sig[0], group_mask->sig[1],
3515                            group_mask->sig[2], group_mask->sig[3]);
3516
3517                         bnx2x_attn_int_deasserted3(bp,
3518                                         attn.sig[3] & group_mask->sig[3]);
3519                         bnx2x_attn_int_deasserted1(bp,
3520                                         attn.sig[1] & group_mask->sig[1]);
3521                         bnx2x_attn_int_deasserted2(bp,
3522                                         attn.sig[2] & group_mask->sig[2]);
3523                         bnx2x_attn_int_deasserted0(bp,
3524                                         attn.sig[0] & group_mask->sig[0]);
3525                 }
3526         }
3527
3528         bnx2x_release_alr(bp);
3529
3530         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3531
3532         val = ~deasserted;
3533         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3534            val, reg_addr);
3535         REG_WR(bp, reg_addr, val);
3536
3537         if (~bp->attn_state & deasserted)
3538                 BNX2X_ERR("IGU ERROR\n");
3539
3540         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3541                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3542
3543         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3544         aeu_mask = REG_RD(bp, reg_addr);
3545
3546         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3547            aeu_mask, deasserted);
3548         aeu_mask |= (deasserted & 0x3ff);
3549         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3550
3551         REG_WR(bp, reg_addr, aeu_mask);
3552         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3553
3554         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3555         bp->attn_state &= ~deasserted;
3556         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3557 }
3558
3559 static void bnx2x_attn_int(struct bnx2x *bp)
3560 {
3561         /* read local copy of bits */
3562         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3563                                                                 attn_bits);
3564         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3565                                                                 attn_bits_ack);
3566         u32 attn_state = bp->attn_state;
3567
3568         /* look for changed bits */
3569         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3570         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3571
3572         DP(NETIF_MSG_HW,
3573            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3574            attn_bits, attn_ack, asserted, deasserted);
3575
3576         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3577                 BNX2X_ERR("BAD attention state\n");
3578
3579         /* handle bits that were raised */
3580         if (asserted)
3581                 bnx2x_attn_int_asserted(bp, asserted);
3582
3583         if (deasserted)
3584                 bnx2x_attn_int_deasserted(bp, deasserted);
3585 }
3586
3587 static void bnx2x_sp_task(struct work_struct *work)
3588 {
3589         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3590         u16 status;
3591
3592         /* Return here if interrupt is disabled */
3593         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3594                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3595                 return;
3596         }
3597
3598         status = bnx2x_update_dsb_idx(bp);
3599 /*      if (status == 0)                                     */
3600 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3601
3602         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3603
3604         /* HW attentions */
3605         if (status & 0x1) {
3606                 bnx2x_attn_int(bp);
3607                 status &= ~0x1;
3608         }
3609
3610         /* CStorm events: STAT_QUERY */
3611         if (status & 0x2) {
3612                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3613                 status &= ~0x2;
3614         }
3615
3616         if (unlikely(status))
3617                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3618                    status);
3619
3620         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3621                      IGU_INT_NOP, 1);
3622         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3623                      IGU_INT_NOP, 1);
3624         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3625                      IGU_INT_NOP, 1);
3626         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3627                      IGU_INT_NOP, 1);
3628         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3629                      IGU_INT_ENABLE, 1);
3630 }
3631
3632 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3633 {
3634         struct net_device *dev = dev_instance;
3635         struct bnx2x *bp = netdev_priv(dev);
3636
3637         /* Return here if interrupt is disabled */
3638         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3639                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3640                 return IRQ_HANDLED;
3641         }
3642
3643         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3644
3645 #ifdef BNX2X_STOP_ON_ERROR
3646         if (unlikely(bp->panic))
3647                 return IRQ_HANDLED;
3648 #endif
3649
3650 #ifdef BCM_CNIC
3651         {
3652                 struct cnic_ops *c_ops;
3653
3654                 rcu_read_lock();
3655                 c_ops = rcu_dereference(bp->cnic_ops);
3656                 if (c_ops)
3657                         c_ops->cnic_handler(bp->cnic_data, NULL);
3658                 rcu_read_unlock();
3659         }
3660 #endif
3661         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3662
3663         return IRQ_HANDLED;
3664 }
3665
3666 /* end of slow path */
3667
3668 /* Statistics */
3669
3670 /****************************************************************************
3671 * Macros
3672 ****************************************************************************/
3673
3674 /* sum[hi:lo] += add[hi:lo] */
3675 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3676         do { \
3677                 s_lo += a_lo; \
3678                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3679         } while (0)
3680
3681 /* difference = minuend - subtrahend */
3682 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3683         do { \
3684                 if (m_lo < s_lo) { \
3685                         /* underflow */ \
3686                         d_hi = m_hi - s_hi; \
3687                         if (d_hi > 0) { \
3688                                 /* we can 'loan' 1 */ \
3689                                 d_hi--; \
3690                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3691                         } else { \
3692                                 /* m_hi <= s_hi */ \
3693                                 d_hi = 0; \
3694                                 d_lo = 0; \
3695                         } \
3696                 } else { \
3697                         /* m_lo >= s_lo */ \
3698                         if (m_hi < s_hi) { \
3699                                 d_hi = 0; \
3700                                 d_lo = 0; \
3701                         } else { \
3702                                 /* m_hi >= s_hi */ \
3703                                 d_hi = m_hi - s_hi; \
3704                                 d_lo = m_lo - s_lo; \
3705                         } \
3706                 } \
3707         } while (0)
3708
3709 #define UPDATE_STAT64(s, t) \
3710         do { \
3711                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3712                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3713                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3714                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3715                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3716                        pstats->mac_stx[1].t##_lo, diff.lo); \
3717         } while (0)
3718
3719 #define UPDATE_STAT64_NIG(s, t) \
3720         do { \
3721                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3722                         diff.lo, new->s##_lo, old->s##_lo); \
3723                 ADD_64(estats->t##_hi, diff.hi, \
3724                        estats->t##_lo, diff.lo); \
3725         } while (0)
3726
3727 /* sum[hi:lo] += add */
3728 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3729         do { \
3730                 s_lo += a; \
3731                 s_hi += (s_lo < a) ? 1 : 0; \
3732         } while (0)
3733
3734 #define UPDATE_EXTEND_STAT(s) \
3735         do { \
3736                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3737                               pstats->mac_stx[1].s##_lo, \
3738                               new->s); \
3739         } while (0)
3740
3741 #define UPDATE_EXTEND_TSTAT(s, t) \
3742         do { \
3743                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3744                 old_tclient->s = tclient->s; \
3745                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746         } while (0)
3747
3748 #define UPDATE_EXTEND_USTAT(s, t) \
3749         do { \
3750                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3751                 old_uclient->s = uclient->s; \
3752                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3753         } while (0)
3754
3755 #define UPDATE_EXTEND_XSTAT(s, t) \
3756         do { \
3757                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3758                 old_xclient->s = xclient->s; \
3759                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3760         } while (0)
3761
3762 /* minuend -= subtrahend */
3763 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3764         do { \
3765                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3766         } while (0)
3767
3768 /* minuend[hi:lo] -= subtrahend */
3769 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3770         do { \
3771                 SUB_64(m_hi, 0, m_lo, s); \
3772         } while (0)
3773
3774 #define SUB_EXTEND_USTAT(s, t) \
3775         do { \
3776                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3777                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3778         } while (0)
3779
3780 /*
3781  * General service functions
3782  */
3783
3784 static inline long bnx2x_hilo(u32 *hiref)
3785 {
3786         u32 lo = *(hiref + 1);
3787 #if (BITS_PER_LONG == 64)
3788         u32 hi = *hiref;
3789
3790         return HILO_U64(hi, lo);
3791 #else
3792         return lo;
3793 #endif
3794 }
3795
3796 /*
3797  * Init service functions
3798  */
3799
3800 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3801 {
3802         if (!bp->stats_pending) {
3803                 struct eth_query_ramrod_data ramrod_data = {0};
3804                 int i, rc;
3805
3806                 ramrod_data.drv_counter = bp->stats_counter++;
3807                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3808                 for_each_queue(bp, i)
3809                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3810
3811                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3812                                    ((u32 *)&ramrod_data)[1],
3813                                    ((u32 *)&ramrod_data)[0], 0);
3814                 if (rc == 0) {
3815                         /* stats ramrod has it's own slot on the spq */
3816                         bp->spq_left++;
3817                         bp->stats_pending = 1;
3818                 }
3819         }
3820 }
3821
3822 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3823 {
3824         struct dmae_command *dmae = &bp->stats_dmae;
3825         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3826
3827         *stats_comp = DMAE_COMP_VAL;
3828         if (CHIP_REV_IS_SLOW(bp))
3829                 return;
3830
3831         /* loader */
3832         if (bp->executer_idx) {
3833                 int loader_idx = PMF_DMAE_C(bp);
3834
3835                 memset(dmae, 0, sizeof(struct dmae_command));
3836
3837                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3838                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3839                                 DMAE_CMD_DST_RESET |
3840 #ifdef __BIG_ENDIAN
3841                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3842 #else
3843                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3844 #endif
3845                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3846                                                DMAE_CMD_PORT_0) |
3847                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3848                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3849                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3850                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3851                                      sizeof(struct dmae_command) *
3852                                      (loader_idx + 1)) >> 2;
3853                 dmae->dst_addr_hi = 0;
3854                 dmae->len = sizeof(struct dmae_command) >> 2;
3855                 if (CHIP_IS_E1(bp))
3856                         dmae->len--;
3857                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3858                 dmae->comp_addr_hi = 0;
3859                 dmae->comp_val = 1;
3860
3861                 *stats_comp = 0;
3862                 bnx2x_post_dmae(bp, dmae, loader_idx);
3863
3864         } else if (bp->func_stx) {
3865                 *stats_comp = 0;
3866                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3867         }
3868 }
3869
3870 static int bnx2x_stats_comp(struct bnx2x *bp)
3871 {
3872         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3873         int cnt = 10;
3874
3875         might_sleep();
3876         while (*stats_comp != DMAE_COMP_VAL) {
3877                 if (!cnt) {
3878                         BNX2X_ERR("timeout waiting for stats finished\n");
3879                         break;
3880                 }
3881                 cnt--;
3882                 msleep(1);
3883         }
3884         return 1;
3885 }
3886
3887 /*
3888  * Statistics service functions
3889  */
3890
3891 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3892 {
3893         struct dmae_command *dmae;
3894         u32 opcode;
3895         int loader_idx = PMF_DMAE_C(bp);
3896         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3897
3898         /* sanity */
3899         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3900                 BNX2X_ERR("BUG!\n");
3901                 return;
3902         }
3903
3904         bp->executer_idx = 0;
3905
3906         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3907                   DMAE_CMD_C_ENABLE |
3908                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3909 #ifdef __BIG_ENDIAN
3910                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3911 #else
3912                   DMAE_CMD_ENDIANITY_DW_SWAP |
3913 #endif
3914                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3915                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3916
3917         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3918         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3919         dmae->src_addr_lo = bp->port.port_stx >> 2;
3920         dmae->src_addr_hi = 0;
3921         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3922         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3923         dmae->len = DMAE_LEN32_RD_MAX;
3924         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3925         dmae->comp_addr_hi = 0;
3926         dmae->comp_val = 1;
3927
3928         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3929         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3930         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3931         dmae->src_addr_hi = 0;
3932         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3933                                    DMAE_LEN32_RD_MAX * 4);
3934         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3935                                    DMAE_LEN32_RD_MAX * 4);
3936         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3937         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3938         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3939         dmae->comp_val = DMAE_COMP_VAL;
3940
3941         *stats_comp = 0;
3942         bnx2x_hw_stats_post(bp);
3943         bnx2x_stats_comp(bp);
3944 }
3945
3946 static void bnx2x_port_stats_init(struct bnx2x *bp)
3947 {
3948         struct dmae_command *dmae;
3949         int port = BP_PORT(bp);
3950         int vn = BP_E1HVN(bp);
3951         u32 opcode;
3952         int loader_idx = PMF_DMAE_C(bp);
3953         u32 mac_addr;
3954         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3955
3956         /* sanity */
3957         if (!bp->link_vars.link_up || !bp->port.pmf) {
3958                 BNX2X_ERR("BUG!\n");
3959                 return;
3960         }
3961
3962         bp->executer_idx = 0;
3963
3964         /* MCP */
3965         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3966                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3967                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3968 #ifdef __BIG_ENDIAN
3969                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3970 #else
3971                   DMAE_CMD_ENDIANITY_DW_SWAP |
3972 #endif
3973                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3974                   (vn << DMAE_CMD_E1HVN_SHIFT));
3975
3976         if (bp->port.port_stx) {
3977
3978                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979                 dmae->opcode = opcode;
3980                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3981                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3982                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3983                 dmae->dst_addr_hi = 0;
3984                 dmae->len = sizeof(struct host_port_stats) >> 2;
3985                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986                 dmae->comp_addr_hi = 0;
3987                 dmae->comp_val = 1;
3988         }
3989
3990         if (bp->func_stx) {
3991
3992                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3993                 dmae->opcode = opcode;
3994                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3995                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3996                 dmae->dst_addr_lo = bp->func_stx >> 2;
3997                 dmae->dst_addr_hi = 0;
3998                 dmae->len = sizeof(struct host_func_stats) >> 2;
3999                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4000                 dmae->comp_addr_hi = 0;
4001                 dmae->comp_val = 1;
4002         }
4003
4004         /* MAC */
4005         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4006                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4007                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4008 #ifdef __BIG_ENDIAN
4009                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4010 #else
4011                   DMAE_CMD_ENDIANITY_DW_SWAP |
4012 #endif
4013                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4014                   (vn << DMAE_CMD_E1HVN_SHIFT));
4015
4016         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
4017
4018                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4019                                    NIG_REG_INGRESS_BMAC0_MEM);
4020
4021                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4022                    BIGMAC_REGISTER_TX_STAT_GTBYT */
4023                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4024                 dmae->opcode = opcode;
4025                 dmae->src_addr_lo = (mac_addr +
4026                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4027                 dmae->src_addr_hi = 0;
4028                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4029                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4030                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4031                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4032                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4033                 dmae->comp_addr_hi = 0;
4034                 dmae->comp_val = 1;
4035
4036                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4037                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
4038                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4039                 dmae->opcode = opcode;
4040                 dmae->src_addr_lo = (mac_addr +
4041                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4042                 dmae->src_addr_hi = 0;
4043                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4044                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4045                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4046                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4047                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4048                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4049                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4050                 dmae->comp_addr_hi = 0;
4051                 dmae->comp_val = 1;
4052
4053         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4054
4055                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4056
4057                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4058                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4059                 dmae->opcode = opcode;
4060                 dmae->src_addr_lo = (mac_addr +
4061                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4062                 dmae->src_addr_hi = 0;
4063                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4064                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4065                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4066                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4067                 dmae->comp_addr_hi = 0;
4068                 dmae->comp_val = 1;
4069
4070                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4071                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4072                 dmae->opcode = opcode;
4073                 dmae->src_addr_lo = (mac_addr +
4074                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4075                 dmae->src_addr_hi = 0;
4076                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4077                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4078                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4079                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4080                 dmae->len = 1;
4081                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4082                 dmae->comp_addr_hi = 0;
4083                 dmae->comp_val = 1;
4084
4085                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4086                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4087                 dmae->opcode = opcode;
4088                 dmae->src_addr_lo = (mac_addr +
4089                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4090                 dmae->src_addr_hi = 0;
4091                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4092                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4093                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4094                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4095                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4096                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097                 dmae->comp_addr_hi = 0;
4098                 dmae->comp_val = 1;
4099         }
4100
4101         /* NIG */
4102         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4103         dmae->opcode = opcode;
4104         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4105                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4106         dmae->src_addr_hi = 0;
4107         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4108         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4109         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4110         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111         dmae->comp_addr_hi = 0;
4112         dmae->comp_val = 1;
4113
4114         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115         dmae->opcode = opcode;
4116         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4117                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4118         dmae->src_addr_hi = 0;
4119         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4120                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4121         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4122                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4123         dmae->len = (2*sizeof(u32)) >> 2;
4124         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4125         dmae->comp_addr_hi = 0;
4126         dmae->comp_val = 1;
4127
4128         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4129         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4130                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4131                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4132 #ifdef __BIG_ENDIAN
4133                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4134 #else
4135                         DMAE_CMD_ENDIANITY_DW_SWAP |
4136 #endif
4137                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4138                         (vn << DMAE_CMD_E1HVN_SHIFT));
4139         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4140                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4141         dmae->src_addr_hi = 0;
4142         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4143                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4144         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4145                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4146         dmae->len = (2*sizeof(u32)) >> 2;
4147         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4148         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4149         dmae->comp_val = DMAE_COMP_VAL;
4150
4151         *stats_comp = 0;
4152 }
4153
4154 static void bnx2x_func_stats_init(struct bnx2x *bp)
4155 {
4156         struct dmae_command *dmae = &bp->stats_dmae;
4157         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4158
4159         /* sanity */
4160         if (!bp->func_stx) {
4161                 BNX2X_ERR("BUG!\n");
4162                 return;
4163         }
4164
4165         bp->executer_idx = 0;
4166         memset(dmae, 0, sizeof(struct dmae_command));
4167
4168         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4169                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4170                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4171 #ifdef __BIG_ENDIAN
4172                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4173 #else
4174                         DMAE_CMD_ENDIANITY_DW_SWAP |
4175 #endif
4176                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4177                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4178         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4179         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4180         dmae->dst_addr_lo = bp->func_stx >> 2;
4181         dmae->dst_addr_hi = 0;
4182         dmae->len = sizeof(struct host_func_stats) >> 2;
4183         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4184         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4185         dmae->comp_val = DMAE_COMP_VAL;
4186
4187         *stats_comp = 0;
4188 }
4189
4190 static void bnx2x_stats_start(struct bnx2x *bp)
4191 {
4192         if (bp->port.pmf)
4193                 bnx2x_port_stats_init(bp);
4194
4195         else if (bp->func_stx)
4196                 bnx2x_func_stats_init(bp);
4197
4198         bnx2x_hw_stats_post(bp);
4199         bnx2x_storm_stats_post(bp);
4200 }
4201
4202 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4203 {
4204         bnx2x_stats_comp(bp);
4205         bnx2x_stats_pmf_update(bp);
4206         bnx2x_stats_start(bp);
4207 }
4208
4209 static void bnx2x_stats_restart(struct bnx2x *bp)
4210 {
4211         bnx2x_stats_comp(bp);
4212         bnx2x_stats_start(bp);
4213 }
4214
4215 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4216 {
4217         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4218         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4219         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4220         struct {
4221                 u32 lo;
4222                 u32 hi;
4223         } diff;
4224
4225         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4226         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4227         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4228         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4229         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4230         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4231         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4232         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4233         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4234         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4235         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4236         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4237         UPDATE_STAT64(tx_stat_gt127,
4238                                 tx_stat_etherstatspkts65octetsto127octets);
4239         UPDATE_STAT64(tx_stat_gt255,
4240                                 tx_stat_etherstatspkts128octetsto255octets);
4241         UPDATE_STAT64(tx_stat_gt511,
4242                                 tx_stat_etherstatspkts256octetsto511octets);
4243         UPDATE_STAT64(tx_stat_gt1023,
4244                                 tx_stat_etherstatspkts512octetsto1023octets);
4245         UPDATE_STAT64(tx_stat_gt1518,
4246                                 tx_stat_etherstatspkts1024octetsto1522octets);
4247         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4248         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4249         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4250         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4251         UPDATE_STAT64(tx_stat_gterr,
4252                                 tx_stat_dot3statsinternalmactransmiterrors);
4253         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4254
4255         estats->pause_frames_received_hi =
4256                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4257         estats->pause_frames_received_lo =
4258                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4259
4260         estats->pause_frames_sent_hi =
4261                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4262         estats->pause_frames_sent_lo =
4263                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4264 }
4265
4266 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4267 {
4268         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4269         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4270         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4271
4272         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4273         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4274         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4275         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4276         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4277         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4278         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4279         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4280         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4281         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4282         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4283         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4284         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4285         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4286         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4287         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4288         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4289         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4290         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4291         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4292         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4293         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4294         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4295         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4296         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4297         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4298         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4299         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4300         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4301         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4302         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4303
4304         estats->pause_frames_received_hi =
4305                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4306         estats->pause_frames_received_lo =
4307                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4308         ADD_64(estats->pause_frames_received_hi,
4309                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4310                estats->pause_frames_received_lo,
4311                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4312
4313         estats->pause_frames_sent_hi =
4314                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
4315         estats->pause_frames_sent_lo =
4316                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
4317         ADD_64(estats->pause_frames_sent_hi,
4318                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4319                estats->pause_frames_sent_lo,
4320                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4321 }
4322
4323 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4324 {
4325         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4326         struct nig_stats *old = &(bp->port.old_nig_stats);
4327         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4328         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4329         struct {
4330                 u32 lo;
4331                 u32 hi;
4332         } diff;
4333
4334         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4335                 bnx2x_bmac_stats_update(bp);
4336
4337         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4338                 bnx2x_emac_stats_update(bp);
4339
4340         else { /* unreached */
4341                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4342                 return -1;
4343         }
4344
4345         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4346                       new->brb_discard - old->brb_discard);
4347         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4348                       new->brb_truncate - old->brb_truncate);
4349
4350         UPDATE_STAT64_NIG(egress_mac_pkt0,
4351                                         etherstatspkts1024octetsto1522octets);
4352         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4353
4354         memcpy(old, new, sizeof(struct nig_stats));
4355
4356         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4357                sizeof(struct mac_stx));
4358         estats->brb_drop_hi = pstats->brb_drop_hi;
4359         estats->brb_drop_lo = pstats->brb_drop_lo;
4360
4361         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4362
4363         if (!BP_NOMCP(bp)) {
4364                 u32 nig_timer_max =
4365                         SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4366                 if (nig_timer_max != estats->nig_timer_max) {
4367                         estats->nig_timer_max = nig_timer_max;
4368                         BNX2X_ERR("NIG timer max (%u)\n",
4369                                   estats->nig_timer_max);
4370                 }
4371         }
4372
4373         return 0;
4374 }
4375
4376 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4377 {
4378         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4379         struct tstorm_per_port_stats *tport =
4380                                         &stats->tstorm_common.port_statistics;
4381         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4382         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4383         int i;
4384
4385         memcpy(&(fstats->total_bytes_received_hi),
4386                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4387                sizeof(struct host_func_stats) - 2*sizeof(u32));
4388         estats->error_bytes_received_hi = 0;
4389         estats->error_bytes_received_lo = 0;
4390         estats->etherstatsoverrsizepkts_hi = 0;
4391         estats->etherstatsoverrsizepkts_lo = 0;
4392         estats->no_buff_discard_hi = 0;
4393         estats->no_buff_discard_lo = 0;
4394
4395         for_each_queue(bp, i) {
4396                 struct bnx2x_fastpath *fp = &bp->fp[i];
4397                 int cl_id = fp->cl_id;
4398                 struct tstorm_per_client_stats *tclient =
4399                                 &stats->tstorm_common.client_statistics[cl_id];
4400                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4401                 struct ustorm_per_client_stats *uclient =
4402                                 &stats->ustorm_common.client_statistics[cl_id];
4403                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4404                 struct xstorm_per_client_stats *xclient =
4405                                 &stats->xstorm_common.client_statistics[cl_id];
4406                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4407                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4408                 u32 diff;
4409
4410                 /* are storm stats valid? */
4411                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4412                                                         bp->stats_counter) {
4413                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4414                            "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
4415                            i, xclient->stats_counter, bp->stats_counter);
4416                         return -1;
4417                 }
4418                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4419                                                         bp->stats_counter) {
4420                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4421                            "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
4422                            i, tclient->stats_counter, bp->stats_counter);
4423                         return -2;
4424                 }
4425                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4426                                                         bp->stats_counter) {
4427                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4428                            "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
4429                            i, uclient->stats_counter, bp->stats_counter);
4430                         return -4;
4431                 }
4432
4433                 qstats->total_bytes_received_hi =
4434                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4435                 qstats->total_bytes_received_lo =
4436                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4437
4438                 ADD_64(qstats->total_bytes_received_hi,
4439                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4440                        qstats->total_bytes_received_lo,
4441                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4442
4443                 ADD_64(qstats->total_bytes_received_hi,
4444                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4445                        qstats->total_bytes_received_lo,
4446                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4447
4448                 SUB_64(qstats->total_bytes_received_hi,
4449                        le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4450                        qstats->total_bytes_received_lo,
4451                        le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4452
4453                 SUB_64(qstats->total_bytes_received_hi,
4454                        le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4455                        qstats->total_bytes_received_lo,
4456                        le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4457
4458                 SUB_64(qstats->total_bytes_received_hi,
4459                        le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4460                        qstats->total_bytes_received_lo,
4461                        le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4462
4463                 qstats->valid_bytes_received_hi =
4464                                         qstats->total_bytes_received_hi;
4465                 qstats->valid_bytes_received_lo =
4466                                         qstats->total_bytes_received_lo;
4467
4468                 qstats->error_bytes_received_hi =
4469                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4470                 qstats->error_bytes_received_lo =
4471                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4472
4473                 ADD_64(qstats->total_bytes_received_hi,
4474                        qstats->error_bytes_received_hi,
4475                        qstats->total_bytes_received_lo,
4476                        qstats->error_bytes_received_lo);
4477
4478                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4479                                         total_unicast_packets_received);
4480                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4481                                         total_multicast_packets_received);
4482                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4483                                         total_broadcast_packets_received);
4484                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4485                                         etherstatsoverrsizepkts);
4486                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4487
4488                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4489                                         total_unicast_packets_received);
4490                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4491                                         total_multicast_packets_received);
4492                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4493                                         total_broadcast_packets_received);
4494                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4495                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4496                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4497
4498                 qstats->total_bytes_transmitted_hi =
4499                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4500                 qstats->total_bytes_transmitted_lo =
4501                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4502
4503                 ADD_64(qstats->total_bytes_transmitted_hi,
4504                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4505                        qstats->total_bytes_transmitted_lo,
4506                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4507
4508                 ADD_64(qstats->total_bytes_transmitted_hi,
4509                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4510                        qstats->total_bytes_transmitted_lo,
4511                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4512
4513                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4514                                         total_unicast_packets_transmitted);
4515                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4516                                         total_multicast_packets_transmitted);
4517                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4518                                         total_broadcast_packets_transmitted);
4519
4520                 old_tclient->checksum_discard = tclient->checksum_discard;
4521                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4522
4523                 ADD_64(fstats->total_bytes_received_hi,
4524                        qstats->total_bytes_received_hi,
4525                        fstats->total_bytes_received_lo,
4526                        qstats->total_bytes_received_lo);
4527                 ADD_64(fstats->total_bytes_transmitted_hi,
4528                        qstats->total_bytes_transmitted_hi,
4529                        fstats->total_bytes_transmitted_lo,
4530                        qstats->total_bytes_transmitted_lo);
4531                 ADD_64(fstats->total_unicast_packets_received_hi,
4532                        qstats->total_unicast_packets_received_hi,
4533                        fstats->total_unicast_packets_received_lo,
4534                        qstats->total_unicast_packets_received_lo);
4535                 ADD_64(fstats->total_multicast_packets_received_hi,
4536                        qstats->total_multicast_packets_received_hi,
4537                        fstats->total_multicast_packets_received_lo,
4538                        qstats->total_multicast_packets_received_lo);
4539                 ADD_64(fstats->total_broadcast_packets_received_hi,
4540                        qstats->total_broadcast_packets_received_hi,
4541                        fstats->total_broadcast_packets_received_lo,
4542                        qstats->total_broadcast_packets_received_lo);
4543                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4544                        qstats->total_unicast_packets_transmitted_hi,
4545                        fstats->total_unicast_packets_transmitted_lo,
4546                        qstats->total_unicast_packets_transmitted_lo);
4547                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4548                        qstats->total_multicast_packets_transmitted_hi,
4549                        fstats->total_multicast_packets_transmitted_lo,
4550                        qstats->total_multicast_packets_transmitted_lo);
4551                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4552                        qstats->total_broadcast_packets_transmitted_hi,
4553                        fstats->total_broadcast_packets_transmitted_lo,
4554                        qstats->total_broadcast_packets_transmitted_lo);
4555                 ADD_64(fstats->valid_bytes_received_hi,
4556                        qstats->valid_bytes_received_hi,
4557                        fstats->valid_bytes_received_lo,
4558                        qstats->valid_bytes_received_lo);
4559
4560                 ADD_64(estats->error_bytes_received_hi,
4561                        qstats->error_bytes_received_hi,
4562                        estats->error_bytes_received_lo,
4563                        qstats->error_bytes_received_lo);
4564                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4565                        qstats->etherstatsoverrsizepkts_hi,
4566                        estats->etherstatsoverrsizepkts_lo,
4567                        qstats->etherstatsoverrsizepkts_lo);
4568                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4569                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4570         }
4571
4572         ADD_64(fstats->total_bytes_received_hi,
4573                estats->rx_stat_ifhcinbadoctets_hi,
4574                fstats->total_bytes_received_lo,
4575                estats->rx_stat_ifhcinbadoctets_lo);
4576
4577         memcpy(estats, &(fstats->total_bytes_received_hi),
4578                sizeof(struct host_func_stats) - 2*sizeof(u32));
4579
4580         ADD_64(estats->etherstatsoverrsizepkts_hi,
4581                estats->rx_stat_dot3statsframestoolong_hi,
4582                estats->etherstatsoverrsizepkts_lo,
4583                estats->rx_stat_dot3statsframestoolong_lo);
4584         ADD_64(estats->error_bytes_received_hi,
4585                estats->rx_stat_ifhcinbadoctets_hi,
4586                estats->error_bytes_received_lo,
4587                estats->rx_stat_ifhcinbadoctets_lo);
4588
4589         if (bp->port.pmf) {
4590                 estats->mac_filter_discard =
4591                                 le32_to_cpu(tport->mac_filter_discard);
4592                 estats->xxoverflow_discard =
4593                                 le32_to_cpu(tport->xxoverflow_discard);
4594                 estats->brb_truncate_discard =
4595                                 le32_to_cpu(tport->brb_truncate_discard);
4596                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4597         }
4598
4599         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4600
4601         bp->stats_pending = 0;
4602
4603         return 0;
4604 }
4605
4606 static void bnx2x_net_stats_update(struct bnx2x *bp)
4607 {
4608         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4609         struct net_device_stats *nstats = &bp->dev->stats;
4610         int i;
4611
4612         nstats->rx_packets =
4613                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4614                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4615                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4616
4617         nstats->tx_packets =
4618                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4619                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4620                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4621
4622         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4623
4624         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4625
4626         nstats->rx_dropped = estats->mac_discard;
4627         for_each_queue(bp, i)
4628                 nstats->rx_dropped +=
4629                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4630
4631         nstats->tx_dropped = 0;
4632
4633         nstats->multicast =
4634                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4635
4636         nstats->collisions =
4637                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4638
4639         nstats->rx_length_errors =
4640                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4641                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4642         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4643                                  bnx2x_hilo(&estats->brb_truncate_hi);
4644         nstats->rx_crc_errors =
4645                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4646         nstats->rx_frame_errors =
4647                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4648         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4649         nstats->rx_missed_errors = estats->xxoverflow_discard;
4650
4651         nstats->rx_errors = nstats->rx_length_errors +
4652                             nstats->rx_over_errors +
4653                             nstats->rx_crc_errors +
4654                             nstats->rx_frame_errors +
4655                             nstats->rx_fifo_errors +
4656                             nstats->rx_missed_errors;
4657
4658         nstats->tx_aborted_errors =
4659                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4660                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4661         nstats->tx_carrier_errors =
4662                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4663         nstats->tx_fifo_errors = 0;
4664         nstats->tx_heartbeat_errors = 0;
4665         nstats->tx_window_errors = 0;
4666
4667         nstats->tx_errors = nstats->tx_aborted_errors +
4668                             nstats->tx_carrier_errors +
4669             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4670 }
4671
4672 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4673 {
4674         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4675         int i;
4676
4677         estats->driver_xoff = 0;
4678         estats->rx_err_discard_pkt = 0;
4679         estats->rx_skb_alloc_failed = 0;
4680         estats->hw_csum_err = 0;
4681         for_each_queue(bp, i) {
4682                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4683
4684                 estats->driver_xoff += qstats->driver_xoff;
4685                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4686                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4687                 estats->hw_csum_err += qstats->hw_csum_err;
4688         }
4689 }
4690
4691 static void bnx2x_stats_update(struct bnx2x *bp)
4692 {
4693         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4694
4695         if (*stats_comp != DMAE_COMP_VAL)
4696                 return;
4697
4698         if (bp->port.pmf)
4699                 bnx2x_hw_stats_update(bp);
4700
4701         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4702                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4703                 bnx2x_panic();
4704                 return;
4705         }
4706
4707         bnx2x_net_stats_update(bp);
4708         bnx2x_drv_stats_update(bp);
4709
4710         if (netif_msg_timer(bp)) {
4711                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4712                 int i;
4713
4714                 printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
4715                        bp->dev->name,
4716                        estats->brb_drop_lo, estats->brb_truncate_lo);
4717
4718                 for_each_queue(bp, i) {
4719                         struct bnx2x_fastpath *fp = &bp->fp[i];
4720                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721
4722                         printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
4723                                           "  rx pkt(%lu)  rx calls(%lu %lu)\n",
4724                                fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4725                                fp->rx_comp_cons),
4726                                le16_to_cpu(*fp->rx_cons_sb),
4727                                bnx2x_hilo(&qstats->
4728                                           total_unicast_packets_received_hi),
4729                                fp->rx_calls, fp->rx_pkt);
4730                 }
4731
4732                 for_each_queue(bp, i) {
4733                         struct bnx2x_fastpath *fp = &bp->fp[i];
4734                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4735                         struct netdev_queue *txq =
4736                                 netdev_get_tx_queue(bp->dev, i);
4737
4738                         printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
4739                                           "  tx pkt(%lu) tx calls (%lu)"
4740                                           "  %s (Xoff events %u)\n",
4741                                fp->name, bnx2x_tx_avail(fp),
4742                                le16_to_cpu(*fp->tx_cons_sb),
4743                                bnx2x_hilo(&qstats->
4744                                           total_unicast_packets_transmitted_hi),
4745                                fp->tx_pkt,
4746                                (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4747                                qstats->driver_xoff);
4748                 }
4749         }
4750
4751         bnx2x_hw_stats_post(bp);
4752         bnx2x_storm_stats_post(bp);
4753 }
4754
4755 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4756 {
4757         struct dmae_command *dmae;
4758         u32 opcode;
4759         int loader_idx = PMF_DMAE_C(bp);
4760         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4761
4762         bp->executer_idx = 0;
4763
4764         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4765                   DMAE_CMD_C_ENABLE |
4766                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4767 #ifdef __BIG_ENDIAN
4768                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4769 #else
4770                   DMAE_CMD_ENDIANITY_DW_SWAP |
4771 #endif
4772                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4773                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4774
4775         if (bp->port.port_stx) {
4776
4777                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4778                 if (bp->func_stx)
4779                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4780                 else
4781                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4782                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4783                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4784                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4785                 dmae->dst_addr_hi = 0;
4786                 dmae->len = sizeof(struct host_port_stats) >> 2;
4787                 if (bp->func_stx) {
4788                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4789                         dmae->comp_addr_hi = 0;
4790                         dmae->comp_val = 1;
4791                 } else {
4792                         dmae->comp_addr_lo =
4793                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4794                         dmae->comp_addr_hi =
4795                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4796                         dmae->comp_val = DMAE_COMP_VAL;
4797
4798                         *stats_comp = 0;
4799                 }
4800         }
4801
4802         if (bp->func_stx) {
4803
4804                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4805                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4806                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4807                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4808                 dmae->dst_addr_lo = bp->func_stx >> 2;
4809                 dmae->dst_addr_hi = 0;
4810                 dmae->len = sizeof(struct host_func_stats) >> 2;
4811                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4812                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4813                 dmae->comp_val = DMAE_COMP_VAL;
4814
4815                 *stats_comp = 0;
4816         }
4817 }
4818
4819 static void bnx2x_stats_stop(struct bnx2x *bp)
4820 {
4821         int update = 0;
4822
4823         bnx2x_stats_comp(bp);
4824
4825         if (bp->port.pmf)
4826                 update = (bnx2x_hw_stats_update(bp) == 0);
4827
4828         update |= (bnx2x_storm_stats_update(bp) == 0);
4829
4830         if (update) {
4831                 bnx2x_net_stats_update(bp);
4832
4833                 if (bp->port.pmf)
4834                         bnx2x_port_stats_stop(bp);
4835
4836                 bnx2x_hw_stats_post(bp);
4837                 bnx2x_stats_comp(bp);
4838         }
4839 }
4840
4841 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4842 {
4843 }
4844
4845 static const struct {
4846         void (*action)(struct bnx2x *bp);
4847         enum bnx2x_stats_state next_state;
4848 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4849 /* state        event   */
4850 {
4851 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4852 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4853 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4854 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4855 },
4856 {
4857 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4858 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4859 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4860 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4861 }
4862 };
4863
4864 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4865 {
4866         enum bnx2x_stats_state state = bp->stats_state;
4867
4868         if (unlikely(bp->panic))
4869                 return;
4870
4871         bnx2x_stats_stm[state][event].action(bp);
4872         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4873
4874         /* Make sure the state has been "changed" */
4875         smp_wmb();
4876
4877         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4878                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4879                    state, event, bp->stats_state);
4880 }
4881
4882 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4883 {
4884         struct dmae_command *dmae;
4885         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4886
4887         /* sanity */
4888         if (!bp->port.pmf || !bp->port.port_stx) {
4889                 BNX2X_ERR("BUG!\n");
4890                 return;
4891         }
4892
4893         bp->executer_idx = 0;
4894
4895         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4896         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4897                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4898                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4899 #ifdef __BIG_ENDIAN
4900                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4901 #else
4902                         DMAE_CMD_ENDIANITY_DW_SWAP |
4903 #endif
4904                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4905                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4906         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4907         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4908         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4909         dmae->dst_addr_hi = 0;
4910         dmae->len = sizeof(struct host_port_stats) >> 2;
4911         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4912         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4913         dmae->comp_val = DMAE_COMP_VAL;
4914
4915         *stats_comp = 0;
4916         bnx2x_hw_stats_post(bp);
4917         bnx2x_stats_comp(bp);
4918 }
4919
4920 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4921 {
4922         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4923         int port = BP_PORT(bp);
4924         int func;
4925         u32 func_stx;
4926
4927         /* sanity */
4928         if (!bp->port.pmf || !bp->func_stx) {
4929                 BNX2X_ERR("BUG!\n");
4930                 return;
4931         }
4932
4933         /* save our func_stx */
4934         func_stx = bp->func_stx;
4935
4936         for (vn = VN_0; vn < vn_max; vn++) {
4937                 func = 2*vn + port;
4938
4939                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4940                 bnx2x_func_stats_init(bp);
4941                 bnx2x_hw_stats_post(bp);
4942                 bnx2x_stats_comp(bp);
4943         }
4944
4945         /* restore our func_stx */
4946         bp->func_stx = func_stx;
4947 }
4948
4949 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4950 {
4951         struct dmae_command *dmae = &bp->stats_dmae;
4952         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4953
4954         /* sanity */
4955         if (!bp->func_stx) {
4956                 BNX2X_ERR("BUG!\n");
4957                 return;
4958         }
4959
4960         bp->executer_idx = 0;
4961         memset(dmae, 0, sizeof(struct dmae_command));
4962
4963         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4964                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4965                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4966 #ifdef __BIG_ENDIAN
4967                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4968 #else
4969                         DMAE_CMD_ENDIANITY_DW_SWAP |
4970 #endif
4971                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4972                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4973         dmae->src_addr_lo = bp->func_stx >> 2;
4974         dmae->src_addr_hi = 0;
4975         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4976         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4977         dmae->len = sizeof(struct host_func_stats) >> 2;
4978         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4979         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4980         dmae->comp_val = DMAE_COMP_VAL;
4981
4982         *stats_comp = 0;
4983         bnx2x_hw_stats_post(bp);
4984         bnx2x_stats_comp(bp);
4985 }
4986
4987 static void bnx2x_stats_init(struct bnx2x *bp)
4988 {
4989         int port = BP_PORT(bp);
4990         int func = BP_FUNC(bp);
4991         int i;
4992
4993         bp->stats_pending = 0;
4994         bp->executer_idx = 0;
4995         bp->stats_counter = 0;
4996
4997         /* port and func stats for management */
4998         if (!BP_NOMCP(bp)) {
4999                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
5000                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5001
5002         } else {
5003                 bp->port.port_stx = 0;
5004                 bp->func_stx = 0;
5005         }
5006         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
5007            bp->port.port_stx, bp->func_stx);
5008
5009         /* port stats */
5010         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
5011         bp->port.old_nig_stats.brb_discard =
5012                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5013         bp->port.old_nig_stats.brb_truncate =
5014                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5015         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5016                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5017         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5018                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5019
5020         /* function stats */
5021         for_each_queue(bp, i) {
5022                 struct bnx2x_fastpath *fp = &bp->fp[i];
5023
5024                 memset(&fp->old_tclient, 0,
5025                        sizeof(struct tstorm_per_client_stats));
5026                 memset(&fp->old_uclient, 0,
5027                        sizeof(struct ustorm_per_client_stats));
5028                 memset(&fp->old_xclient, 0,
5029                        sizeof(struct xstorm_per_client_stats));
5030                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5031         }
5032
5033         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5034         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5035
5036         bp->stats_state = STATS_STATE_DISABLED;
5037
5038         if (bp->port.pmf) {
5039                 if (bp->port.port_stx)
5040                         bnx2x_port_stats_base_init(bp);
5041
5042                 if (bp->func_stx)
5043                         bnx2x_func_stats_base_init(bp);
5044
5045         } else if (bp->func_stx)
5046                 bnx2x_func_stats_base_update(bp);
5047 }
5048
5049 static void bnx2x_timer(unsigned long data)
5050 {
5051         struct bnx2x *bp = (struct bnx2x *) data;
5052
5053         if (!netif_running(bp->dev))
5054                 return;
5055
5056         if (atomic_read(&bp->intr_sem) != 0)
5057                 goto timer_restart;
5058
5059         if (poll) {
5060                 struct bnx2x_fastpath *fp = &bp->fp[0];
5061                 int rc;
5062
5063                 bnx2x_tx_int(fp);
5064                 rc = bnx2x_rx_int(fp, 1000);
5065         }
5066
5067         if (!BP_NOMCP(bp)) {
5068                 int func = BP_FUNC(bp);
5069                 u32 drv_pulse;
5070                 u32 mcp_pulse;
5071
5072                 ++bp->fw_drv_pulse_wr_seq;
5073                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5074                 /* TBD - add SYSTEM_TIME */
5075                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5076                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5077
5078                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5079                              MCP_PULSE_SEQ_MASK);
5080                 /* The delta between driver pulse and mcp response
5081                  * should be 1 (before mcp response) or 0 (after mcp response)
5082                  */
5083                 if ((drv_pulse != mcp_pulse) &&
5084                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5085                         /* someone lost a heartbeat... */
5086                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5087                                   drv_pulse, mcp_pulse);
5088                 }
5089         }
5090
5091         if (bp->state == BNX2X_STATE_OPEN)
5092                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5093
5094 timer_restart:
5095         mod_timer(&bp->timer, jiffies + bp->current_interval);
5096 }
5097
5098 /* end of Statistics */
5099
5100 /* nic init */
5101
5102 /*
5103  * nic init service functions
5104  */
5105
5106 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5107 {
5108         int port = BP_PORT(bp);
5109
5110         /* "CSTORM" */
5111         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5112                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5113                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5114         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5115                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5116                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5117 }
5118
5119 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5120                           dma_addr_t mapping, int sb_id)
5121 {
5122         int port = BP_PORT(bp);
5123         int func = BP_FUNC(bp);
5124         int index;
5125         u64 section;
5126
5127         /* USTORM */
5128         section = ((u64)mapping) + offsetof(struct host_status_block,
5129                                             u_status_block);
5130         sb->u_status_block.status_block_id = sb_id;
5131
5132         REG_WR(bp, BAR_CSTRORM_INTMEM +
5133                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5134         REG_WR(bp, BAR_CSTRORM_INTMEM +
5135                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5136                U64_HI(section));
5137         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5138                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5139
5140         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5141                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5142                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5143
5144         /* CSTORM */
5145         section = ((u64)mapping) + offsetof(struct host_status_block,
5146                                             c_status_block);
5147         sb->c_status_block.status_block_id = sb_id;
5148
5149         REG_WR(bp, BAR_CSTRORM_INTMEM +
5150                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5151         REG_WR(bp, BAR_CSTRORM_INTMEM +
5152                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5153                U64_HI(section));
5154         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5155                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5156
5157         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5158                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5159                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5160
5161         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5162 }
5163
5164 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5165 {
5166         int func = BP_FUNC(bp);
5167
5168         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5169                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5170                         sizeof(struct tstorm_def_status_block)/4);
5171         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5172                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5173                         sizeof(struct cstorm_def_status_block_u)/4);
5174         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5175                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5176                         sizeof(struct cstorm_def_status_block_c)/4);
5177         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5178                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5179                         sizeof(struct xstorm_def_status_block)/4);
5180 }
5181
5182 static void bnx2x_init_def_sb(struct bnx2x *bp,
5183                               struct host_def_status_block *def_sb,
5184                               dma_addr_t mapping, int sb_id)
5185 {
5186         int port = BP_PORT(bp);
5187         int func = BP_FUNC(bp);
5188         int index, val, reg_offset;
5189         u64 section;
5190
5191         /* ATTN */
5192         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5193                                             atten_status_block);
5194         def_sb->atten_status_block.status_block_id = sb_id;
5195
5196         bp->attn_state = 0;
5197
5198         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5199                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5200
5201         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5202                 bp->attn_group[index].sig[0] = REG_RD(bp,
5203                                                      reg_offset + 0x10*index);
5204                 bp->attn_group[index].sig[1] = REG_RD(bp,
5205                                                reg_offset + 0x4 + 0x10*index);
5206                 bp->attn_group[index].sig[2] = REG_RD(bp,
5207                                                reg_offset + 0x8 + 0x10*index);
5208                 bp->attn_group[index].sig[3] = REG_RD(bp,
5209                                                reg_offset + 0xc + 0x10*index);
5210         }
5211
5212         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5213                              HC_REG_ATTN_MSG0_ADDR_L);
5214
5215         REG_WR(bp, reg_offset, U64_LO(section));
5216         REG_WR(bp, reg_offset + 4, U64_HI(section));
5217
5218         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5219
5220         val = REG_RD(bp, reg_offset);
5221         val |= sb_id;
5222         REG_WR(bp, reg_offset, val);
5223
5224         /* USTORM */
5225         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5226                                             u_def_status_block);
5227         def_sb->u_def_status_block.status_block_id = sb_id;
5228
5229         REG_WR(bp, BAR_CSTRORM_INTMEM +
5230                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5231         REG_WR(bp, BAR_CSTRORM_INTMEM +
5232                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5233                U64_HI(section));
5234         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5235                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5236
5237         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5238                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5239                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5240
5241         /* CSTORM */
5242         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5243                                             c_def_status_block);
5244         def_sb->c_def_status_block.status_block_id = sb_id;
5245
5246         REG_WR(bp, BAR_CSTRORM_INTMEM +
5247                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5248         REG_WR(bp, BAR_CSTRORM_INTMEM +
5249                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5250                U64_HI(section));
5251         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5252                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5253
5254         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5255                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5256                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5257
5258         /* TSTORM */
5259         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5260                                             t_def_status_block);
5261         def_sb->t_def_status_block.status_block_id = sb_id;
5262
5263         REG_WR(bp, BAR_TSTRORM_INTMEM +
5264                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5265         REG_WR(bp, BAR_TSTRORM_INTMEM +
5266                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5267                U64_HI(section));
5268         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5269                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5270
5271         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5272                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5273                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5274
5275         /* XSTORM */
5276         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5277                                             x_def_status_block);
5278         def_sb->x_def_status_block.status_block_id = sb_id;
5279
5280         REG_WR(bp, BAR_XSTRORM_INTMEM +
5281                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5282         REG_WR(bp, BAR_XSTRORM_INTMEM +
5283                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5284                U64_HI(section));
5285         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5286                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5287
5288         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5289                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5290                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5291
5292         bp->stats_pending = 0;
5293         bp->set_mac_pending = 0;
5294
5295         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5296 }
5297
5298 static void bnx2x_update_coalesce(struct bnx2x *bp)
5299 {
5300         int port = BP_PORT(bp);
5301         int i;
5302
5303         for_each_queue(bp, i) {
5304                 int sb_id = bp->fp[i].sb_id;
5305
5306                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5307                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5308                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5309                                                       U_SB_ETH_RX_CQ_INDEX),
5310                         bp->rx_ticks/(4 * BNX2X_BTR));
5311                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5312                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5313                                                        U_SB_ETH_RX_CQ_INDEX),
5314                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5315
5316                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5317                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5318                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5319                                                       C_SB_ETH_TX_CQ_INDEX),
5320                         bp->tx_ticks/(4 * BNX2X_BTR));
5321                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5322                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5323                                                        C_SB_ETH_TX_CQ_INDEX),
5324                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5325         }
5326 }
5327
5328 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5329                                        struct bnx2x_fastpath *fp, int last)
5330 {
5331         int i;
5332
5333         for (i = 0; i < last; i++) {
5334                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5335                 struct sk_buff *skb = rx_buf->skb;
5336
5337                 if (skb == NULL) {
5338                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5339                         continue;
5340                 }
5341
5342                 if (fp->tpa_state[i] == BNX2X_TPA_START)
5343                         dma_unmap_single(&bp->pdev->dev,
5344                                          dma_unmap_addr(rx_buf, mapping),
5345                                          bp->rx_buf_size, DMA_FROM_DEVICE);
5346
5347                 dev_kfree_skb(skb);
5348                 rx_buf->skb = NULL;
5349         }
5350 }
5351
5352 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5353 {
5354         int func = BP_FUNC(bp);
5355         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5356                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
5357         u16 ring_prod, cqe_ring_prod;
5358         int i, j;
5359
5360         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5361         DP(NETIF_MSG_IFUP,
5362            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5363
5364         if (bp->flags & TPA_ENABLE_FLAG) {
5365
5366                 for_each_queue(bp, j) {
5367                         struct bnx2x_fastpath *fp = &bp->fp[j];
5368
5369                         for (i = 0; i < max_agg_queues; i++) {
5370                                 fp->tpa_pool[i].skb =
5371                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5372                                 if (!fp->tpa_pool[i].skb) {
5373                                         BNX2X_ERR("Failed to allocate TPA "
5374                                                   "skb pool for queue[%d] - "
5375                                                   "disabling TPA on this "
5376                                                   "queue!\n", j);
5377                                         bnx2x_free_tpa_pool(bp, fp, i);
5378                                         fp->disable_tpa = 1;
5379                                         break;
5380                                 }
5381                                 dma_unmap_addr_set((struct sw_rx_bd *)
5382                                                         &bp->fp->tpa_pool[i],
5383                                                    mapping, 0);
5384                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
5385                         }
5386                 }
5387         }
5388
5389         for_each_queue(bp, j) {
5390                 struct bnx2x_fastpath *fp = &bp->fp[j];
5391
5392                 fp->rx_bd_cons = 0;
5393                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5394                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5395
5396                 /* "next page" elements initialization */
5397                 /* SGE ring */
5398                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5399                         struct eth_rx_sge *sge;
5400
5401                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5402                         sge->addr_hi =
5403                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5404                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5405                         sge->addr_lo =
5406                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5407                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5408                 }
5409
5410                 bnx2x_init_sge_ring_bit_mask(fp);
5411
5412                 /* RX BD ring */
5413                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5414                         struct eth_rx_bd *rx_bd;
5415
5416                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5417                         rx_bd->addr_hi =
5418                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5419                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5420                         rx_bd->addr_lo =
5421                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5422                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5423                 }
5424
5425                 /* CQ ring */
5426                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5427                         struct eth_rx_cqe_next_page *nextpg;
5428
5429                         nextpg = (struct eth_rx_cqe_next_page *)
5430                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5431                         nextpg->addr_hi =
5432                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5433                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5434                         nextpg->addr_lo =
5435                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5436                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5437                 }
5438
5439                 /* Allocate SGEs and initialize the ring elements */
5440                 for (i = 0, ring_prod = 0;
5441                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5442
5443                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5444                                 BNX2X_ERR("was only able to allocate "
5445                                           "%d rx sges\n", i);
5446                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5447                                 /* Cleanup already allocated elements */
5448                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5449                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5450                                 fp->disable_tpa = 1;
5451                                 ring_prod = 0;
5452                                 break;
5453                         }
5454                         ring_prod = NEXT_SGE_IDX(ring_prod);
5455                 }
5456                 fp->rx_sge_prod = ring_prod;
5457
5458                 /* Allocate BDs and initialize BD ring */
5459                 fp->rx_comp_cons = 0;
5460                 cqe_ring_prod = ring_prod = 0;
5461                 for (i = 0; i < bp->rx_ring_size; i++) {
5462                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5463                                 BNX2X_ERR("was only able to allocate "
5464                                           "%d rx skbs on queue[%d]\n", i, j);
5465                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5466                                 break;
5467                         }
5468                         ring_prod = NEXT_RX_IDX(ring_prod);
5469                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5470                         WARN_ON(ring_prod <= i);
5471                 }
5472
5473                 fp->rx_bd_prod = ring_prod;
5474                 /* must not have more available CQEs than BDs */
5475                 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5476                                          cqe_ring_prod);
5477                 fp->rx_pkt = fp->rx_calls = 0;
5478
5479                 /* Warning!
5480                  * this will generate an interrupt (to the TSTORM)
5481                  * must only be done after chip is initialized
5482                  */
5483                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5484                                      fp->rx_sge_prod);
5485                 if (j != 0)
5486                         continue;
5487
5488                 REG_WR(bp, BAR_USTRORM_INTMEM +
5489                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5490                        U64_LO(fp->rx_comp_mapping));
5491                 REG_WR(bp, BAR_USTRORM_INTMEM +
5492                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5493                        U64_HI(fp->rx_comp_mapping));
5494         }
5495 }
5496
5497 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5498 {
5499         int i, j;
5500
5501         for_each_queue(bp, j) {
5502                 struct bnx2x_fastpath *fp = &bp->fp[j];
5503
5504                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5505                         struct eth_tx_next_bd *tx_next_bd =
5506                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5507
5508                         tx_next_bd->addr_hi =
5509                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5510                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5511                         tx_next_bd->addr_lo =
5512                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5513                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5514                 }
5515
5516                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5517                 fp->tx_db.data.zero_fill1 = 0;
5518                 fp->tx_db.data.prod = 0;
5519
5520                 fp->tx_pkt_prod = 0;
5521                 fp->tx_pkt_cons = 0;
5522                 fp->tx_bd_prod = 0;
5523                 fp->tx_bd_cons = 0;
5524                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5525                 fp->tx_pkt = 0;
5526         }
5527 }
5528
5529 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5530 {
5531         int func = BP_FUNC(bp);
5532
5533         spin_lock_init(&bp->spq_lock);
5534
5535         bp->spq_left = MAX_SPQ_PENDING;
5536         bp->spq_prod_idx = 0;
5537         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5538         bp->spq_prod_bd = bp->spq;
5539         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5540
5541         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5542                U64_LO(bp->spq_mapping));
5543         REG_WR(bp,
5544                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5545                U64_HI(bp->spq_mapping));
5546
5547         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5548                bp->spq_prod_idx);
5549 }
5550
5551 static void bnx2x_init_context(struct bnx2x *bp)
5552 {
5553         int i;
5554
5555         /* Rx */
5556         for_each_queue(bp, i) {
5557                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5558                 struct bnx2x_fastpath *fp = &bp->fp[i];
5559                 u8 cl_id = fp->cl_id;
5560
5561                 context->ustorm_st_context.common.sb_index_numbers =
5562                                                 BNX2X_RX_SB_INDEX_NUM;
5563                 context->ustorm_st_context.common.clientId = cl_id;
5564                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5565                 context->ustorm_st_context.common.flags =
5566                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5567                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5568                 context->ustorm_st_context.common.statistics_counter_id =
5569                                                 cl_id;
5570                 context->ustorm_st_context.common.mc_alignment_log_size =
5571                                                 BNX2X_RX_ALIGN_SHIFT;
5572                 context->ustorm_st_context.common.bd_buff_size =
5573                                                 bp->rx_buf_size;
5574                 context->ustorm_st_context.common.bd_page_base_hi =
5575                                                 U64_HI(fp->rx_desc_mapping);
5576                 context->ustorm_st_context.common.bd_page_base_lo =
5577                                                 U64_LO(fp->rx_desc_mapping);
5578                 if (!fp->disable_tpa) {
5579                         context->ustorm_st_context.common.flags |=
5580                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5581                         context->ustorm_st_context.common.sge_buff_size =
5582                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5583                                            0xffff);
5584                         context->ustorm_st_context.common.sge_page_base_hi =
5585                                                 U64_HI(fp->rx_sge_mapping);
5586                         context->ustorm_st_context.common.sge_page_base_lo =
5587                                                 U64_LO(fp->rx_sge_mapping);
5588
5589                         context->ustorm_st_context.common.max_sges_for_packet =
5590                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5591                         context->ustorm_st_context.common.max_sges_for_packet =
5592                                 ((context->ustorm_st_context.common.
5593                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5594                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5595                 }
5596
5597                 context->ustorm_ag_context.cdu_usage =
5598                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5599                                                CDU_REGION_NUMBER_UCM_AG,
5600                                                ETH_CONNECTION_TYPE);
5601
5602                 context->xstorm_ag_context.cdu_reserved =
5603                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5604                                                CDU_REGION_NUMBER_XCM_AG,
5605                                                ETH_CONNECTION_TYPE);
5606         }
5607
5608         /* Tx */
5609         for_each_queue(bp, i) {
5610                 struct bnx2x_fastpath *fp = &bp->fp[i];
5611                 struct eth_context *context =
5612                         bnx2x_sp(bp, context[i].eth);
5613
5614                 context->cstorm_st_context.sb_index_number =
5615                                                 C_SB_ETH_TX_CQ_INDEX;
5616                 context->cstorm_st_context.status_block_id = fp->sb_id;
5617
5618                 context->xstorm_st_context.tx_bd_page_base_hi =
5619                                                 U64_HI(fp->tx_desc_mapping);
5620                 context->xstorm_st_context.tx_bd_page_base_lo =
5621                                                 U64_LO(fp->tx_desc_mapping);
5622                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5623                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5624         }
5625 }
5626
5627 static void bnx2x_init_ind_table(struct bnx2x *bp)
5628 {
5629         int func = BP_FUNC(bp);
5630         int i;
5631
5632         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5633                 return;
5634
5635         DP(NETIF_MSG_IFUP,
5636            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5637         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5638                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5639                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5640                         bp->fp->cl_id + (i % bp->num_queues));
5641 }
5642
5643 static void bnx2x_set_client_config(struct bnx2x *bp)
5644 {
5645         struct tstorm_eth_client_config tstorm_client = {0};
5646         int port = BP_PORT(bp);
5647         int i;
5648
5649         tstorm_client.mtu = bp->dev->mtu;
5650         tstorm_client.config_flags =
5651                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5652                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5653 #ifdef BCM_VLAN
5654         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5655                 tstorm_client.config_flags |=
5656                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5657                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5658         }
5659 #endif
5660
5661         for_each_queue(bp, i) {
5662                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5663
5664                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5665                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5666                        ((u32 *)&tstorm_client)[0]);
5667                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5668                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5669                        ((u32 *)&tstorm_client)[1]);
5670         }
5671
5672         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5673            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5674 }
5675
5676 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5677 {
5678         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5679         int mode = bp->rx_mode;
5680         int mask = bp->rx_mode_cl_mask;
5681         int func = BP_FUNC(bp);
5682         int port = BP_PORT(bp);
5683         int i;
5684         /* All but management unicast packets should pass to the host as well */
5685         u32 llh_mask =
5686                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5687                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5688                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5689                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5690
5691         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5692
5693         switch (mode) {
5694         case BNX2X_RX_MODE_NONE: /* no Rx */
5695                 tstorm_mac_filter.ucast_drop_all = mask;
5696                 tstorm_mac_filter.mcast_drop_all = mask;
5697                 tstorm_mac_filter.bcast_drop_all = mask;
5698                 break;
5699
5700         case BNX2X_RX_MODE_NORMAL:
5701                 tstorm_mac_filter.bcast_accept_all = mask;
5702                 break;
5703
5704         case BNX2X_RX_MODE_ALLMULTI:
5705                 tstorm_mac_filter.mcast_accept_all = mask;
5706                 tstorm_mac_filter.bcast_accept_all = mask;
5707                 break;
5708
5709         case BNX2X_RX_MODE_PROMISC:
5710                 tstorm_mac_filter.ucast_accept_all = mask;
5711                 tstorm_mac_filter.mcast_accept_all = mask;
5712                 tstorm_mac_filter.bcast_accept_all = mask;
5713                 /* pass management unicast packets as well */
5714                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5715                 break;
5716
5717         default:
5718                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5719                 break;
5720         }
5721
5722         REG_WR(bp,
5723                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5724                llh_mask);
5725
5726         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5727                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5728                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5729                        ((u32 *)&tstorm_mac_filter)[i]);
5730
5731 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5732                    ((u32 *)&tstorm_mac_filter)[i]); */
5733         }
5734
5735         if (mode != BNX2X_RX_MODE_NONE)
5736                 bnx2x_set_client_config(bp);
5737 }
5738
5739 static void bnx2x_init_internal_common(struct bnx2x *bp)
5740 {
5741         int i;
5742
5743         /* Zero this manually as its initialization is
5744            currently missing in the initTool */
5745         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5746                 REG_WR(bp, BAR_USTRORM_INTMEM +
5747                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5748 }
5749
5750 static void bnx2x_init_internal_port(struct bnx2x *bp)
5751 {
5752         int port = BP_PORT(bp);
5753
5754         REG_WR(bp,
5755                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5756         REG_WR(bp,
5757                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5758         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5759         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5760 }
5761
5762 static void bnx2x_init_internal_func(struct bnx2x *bp)
5763 {
5764         struct tstorm_eth_function_common_config tstorm_config = {0};
5765         struct stats_indication_flags stats_flags = {0};
5766         int port = BP_PORT(bp);
5767         int func = BP_FUNC(bp);
5768         int i, j;
5769         u32 offset;
5770         u16 max_agg_size;
5771
5772         tstorm_config.config_flags = RSS_FLAGS(bp);
5773
5774         if (is_multi(bp))
5775                 tstorm_config.rss_result_mask = MULTI_MASK;
5776
5777         /* Enable TPA if needed */
5778         if (bp->flags & TPA_ENABLE_FLAG)
5779                 tstorm_config.config_flags |=
5780                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5781
5782         if (IS_E1HMF(bp))
5783                 tstorm_config.config_flags |=
5784                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5785
5786         tstorm_config.leading_client_id = BP_L_ID(bp);
5787
5788         REG_WR(bp, BAR_TSTRORM_INTMEM +
5789                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5790                (*(u32 *)&tstorm_config));
5791
5792         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5793         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5794         bnx2x_set_storm_rx_mode(bp);
5795
5796         for_each_queue(bp, i) {
5797                 u8 cl_id = bp->fp[i].cl_id;
5798
5799                 /* reset xstorm per client statistics */
5800                 offset = BAR_XSTRORM_INTMEM +
5801                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5802                 for (j = 0;
5803                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5804                         REG_WR(bp, offset + j*4, 0);
5805
5806                 /* reset tstorm per client statistics */
5807                 offset = BAR_TSTRORM_INTMEM +
5808                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5809                 for (j = 0;
5810                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5811                         REG_WR(bp, offset + j*4, 0);
5812
5813                 /* reset ustorm per client statistics */
5814                 offset = BAR_USTRORM_INTMEM +
5815                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5816                 for (j = 0;
5817                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5818                         REG_WR(bp, offset + j*4, 0);
5819         }
5820
5821         /* Init statistics related context */
5822         stats_flags.collect_eth = 1;
5823
5824         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5825                ((u32 *)&stats_flags)[0]);
5826         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5827                ((u32 *)&stats_flags)[1]);
5828
5829         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5830                ((u32 *)&stats_flags)[0]);
5831         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5832                ((u32 *)&stats_flags)[1]);
5833
5834         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5835                ((u32 *)&stats_flags)[0]);
5836         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5837                ((u32 *)&stats_flags)[1]);
5838
5839         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5840                ((u32 *)&stats_flags)[0]);
5841         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5842                ((u32 *)&stats_flags)[1]);
5843
5844         REG_WR(bp, BAR_XSTRORM_INTMEM +
5845                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5846                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5847         REG_WR(bp, BAR_XSTRORM_INTMEM +
5848                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5849                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5850
5851         REG_WR(bp, BAR_TSTRORM_INTMEM +
5852                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5853                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5854         REG_WR(bp, BAR_TSTRORM_INTMEM +
5855                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5856                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5857
5858         REG_WR(bp, BAR_USTRORM_INTMEM +
5859                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5860                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5861         REG_WR(bp, BAR_USTRORM_INTMEM +
5862                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5863                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5864
5865         if (CHIP_IS_E1H(bp)) {
5866                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5867                         IS_E1HMF(bp));
5868                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5869                         IS_E1HMF(bp));
5870                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5871                         IS_E1HMF(bp));
5872                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5873                         IS_E1HMF(bp));
5874
5875                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5876                          bp->e1hov);
5877         }
5878
5879         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5880         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5881                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5882         for_each_queue(bp, i) {
5883                 struct bnx2x_fastpath *fp = &bp->fp[i];
5884
5885                 REG_WR(bp, BAR_USTRORM_INTMEM +
5886                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5887                        U64_LO(fp->rx_comp_mapping));
5888                 REG_WR(bp, BAR_USTRORM_INTMEM +
5889                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5890                        U64_HI(fp->rx_comp_mapping));
5891
5892                 /* Next page */
5893                 REG_WR(bp, BAR_USTRORM_INTMEM +
5894                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5895                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5896                 REG_WR(bp, BAR_USTRORM_INTMEM +
5897                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5898                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5899
5900                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5901                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5902                          max_agg_size);
5903         }
5904
5905         /* dropless flow control */
5906         if (CHIP_IS_E1H(bp)) {
5907                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5908
5909                 rx_pause.bd_thr_low = 250;
5910                 rx_pause.cqe_thr_low = 250;
5911                 rx_pause.cos = 1;
5912                 rx_pause.sge_thr_low = 0;
5913                 rx_pause.bd_thr_high = 350;
5914                 rx_pause.cqe_thr_high = 350;
5915                 rx_pause.sge_thr_high = 0;
5916
5917                 for_each_queue(bp, i) {
5918                         struct bnx2x_fastpath *fp = &bp->fp[i];
5919
5920                         if (!fp->disable_tpa) {
5921                                 rx_pause.sge_thr_low = 150;
5922                                 rx_pause.sge_thr_high = 250;
5923                         }
5924
5925
5926                         offset = BAR_USTRORM_INTMEM +
5927                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5928                                                                    fp->cl_id);
5929                         for (j = 0;
5930                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5931                              j++)
5932                                 REG_WR(bp, offset + j*4,
5933                                        ((u32 *)&rx_pause)[j]);
5934                 }
5935         }
5936
5937         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5938
5939         /* Init rate shaping and fairness contexts */
5940         if (IS_E1HMF(bp)) {
5941                 int vn;
5942
5943                 /* During init there is no active link
5944                    Until link is up, set link rate to 10Gbps */
5945                 bp->link_vars.line_speed = SPEED_10000;
5946                 bnx2x_init_port_minmax(bp);
5947
5948                 if (!BP_NOMCP(bp))
5949                         bp->mf_config =
5950                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5951                 bnx2x_calc_vn_weight_sum(bp);
5952
5953                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5954                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5955
5956                 /* Enable rate shaping and fairness */
5957                 bp->cmng.flags.cmng_enables |=
5958                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5959
5960         } else {
5961                 /* rate shaping and fairness are disabled */
5962                 DP(NETIF_MSG_IFUP,
5963                    "single function mode  minmax will be disabled\n");
5964         }
5965
5966
5967         /* Store cmng structures to internal memory */
5968         if (bp->port.pmf)
5969                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5970                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5971                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5972                                ((u32 *)(&bp->cmng))[i]);
5973 }
5974
5975 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5976 {
5977         switch (load_code) {
5978         case FW_MSG_CODE_DRV_LOAD_COMMON:
5979                 bnx2x_init_internal_common(bp);
5980                 /* no break */
5981
5982         case FW_MSG_CODE_DRV_LOAD_PORT:
5983                 bnx2x_init_internal_port(bp);
5984                 /* no break */
5985
5986         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5987                 bnx2x_init_internal_func(bp);
5988                 break;
5989
5990         default:
5991                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5992                 break;
5993         }
5994 }
5995
5996 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5997 {
5998         int i;
5999
6000         for_each_queue(bp, i) {
6001                 struct bnx2x_fastpath *fp = &bp->fp[i];
6002
6003                 fp->bp = bp;
6004                 fp->state = BNX2X_FP_STATE_CLOSED;
6005                 fp->index = i;
6006                 fp->cl_id = BP_L_ID(bp) + i;
6007 #ifdef BCM_CNIC
6008                 fp->sb_id = fp->cl_id + 1;
6009 #else
6010                 fp->sb_id = fp->cl_id;
6011 #endif
6012                 DP(NETIF_MSG_IFUP,
6013                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
6014                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
6015                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
6016                               fp->sb_id);
6017                 bnx2x_update_fpsb_idx(fp);
6018         }
6019
6020         /* ensure status block indices were read */
6021         rmb();
6022
6023
6024         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6025                           DEF_SB_ID);
6026         bnx2x_update_dsb_idx(bp);
6027         bnx2x_update_coalesce(bp);
6028         bnx2x_init_rx_rings(bp);
6029         bnx2x_init_tx_ring(bp);
6030         bnx2x_init_sp_ring(bp);
6031         bnx2x_init_context(bp);
6032         bnx2x_init_internal(bp, load_code);
6033         bnx2x_init_ind_table(bp);
6034         bnx2x_stats_init(bp);
6035
6036         /* At this point, we are ready for interrupts */
6037         atomic_set(&bp->intr_sem, 0);
6038
6039         /* flush all before enabling interrupts */
6040         mb();
6041         mmiowb();
6042
6043         bnx2x_int_enable(bp);
6044
6045         /* Check for SPIO5 */
6046         bnx2x_attn_int_deasserted0(bp,
6047                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6048                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6049 }
6050
6051 /* end of nic init */
6052
6053 /*
6054  * gzip service functions
6055  */
6056
6057 static int bnx2x_gunzip_init(struct bnx2x *bp)
6058 {
6059         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6060                                             &bp->gunzip_mapping, GFP_KERNEL);
6061         if (bp->gunzip_buf  == NULL)
6062                 goto gunzip_nomem1;
6063
6064         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6065         if (bp->strm  == NULL)
6066                 goto gunzip_nomem2;
6067
6068         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6069                                       GFP_KERNEL);
6070         if (bp->strm->workspace == NULL)
6071                 goto gunzip_nomem3;
6072
6073         return 0;
6074
6075 gunzip_nomem3:
6076         kfree(bp->strm);
6077         bp->strm = NULL;
6078
6079 gunzip_nomem2:
6080         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6081                           bp->gunzip_mapping);
6082         bp->gunzip_buf = NULL;
6083
6084 gunzip_nomem1:
6085         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6086                " un-compression\n");
6087         return -ENOMEM;
6088 }
6089
6090 static void bnx2x_gunzip_end(struct bnx2x *bp)
6091 {
6092         kfree(bp->strm->workspace);
6093
6094         kfree(bp->strm);
6095         bp->strm = NULL;
6096
6097         if (bp->gunzip_buf) {
6098                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6099                                   bp->gunzip_mapping);
6100                 bp->gunzip_buf = NULL;
6101         }
6102 }
6103
6104 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6105 {
6106         int n, rc;
6107
6108         /* check gzip header */
6109         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6110                 BNX2X_ERR("Bad gzip header\n");
6111                 return -EINVAL;
6112         }
6113
6114         n = 10;
6115
6116 #define FNAME                           0x8
6117
6118         if (zbuf[3] & FNAME)
6119                 while ((zbuf[n++] != 0) && (n < len));
6120
6121         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6122         bp->strm->avail_in = len - n;
6123         bp->strm->next_out = bp->gunzip_buf;
6124         bp->strm->avail_out = FW_BUF_SIZE;
6125
6126         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6127         if (rc != Z_OK)
6128                 return rc;
6129
6130         rc = zlib_inflate(bp->strm, Z_FINISH);
6131         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6132                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6133                            bp->strm->msg);
6134
6135         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6136         if (bp->gunzip_outlen & 0x3)
6137                 netdev_err(bp->dev, "Firmware decompression error:"
6138                                     " gunzip_outlen (%d) not aligned\n",
6139                                 bp->gunzip_outlen);
6140         bp->gunzip_outlen >>= 2;
6141
6142         zlib_inflateEnd(bp->strm);
6143
6144         if (rc == Z_STREAM_END)
6145                 return 0;
6146
6147         return rc;
6148 }
6149
6150 /* nic load/unload */
6151
6152 /*
6153  * General service functions
6154  */
6155
6156 /* send a NIG loopback debug packet */
6157 static void bnx2x_lb_pckt(struct bnx2x *bp)
6158 {
6159         u32 wb_write[3];
6160
6161         /* Ethernet source and destination addresses */
6162         wb_write[0] = 0x55555555;
6163         wb_write[1] = 0x55555555;
6164         wb_write[2] = 0x20;             /* SOP */
6165         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6166
6167         /* NON-IP protocol */
6168         wb_write[0] = 0x09000000;
6169         wb_write[1] = 0x55555555;
6170         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6171         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6172 }
6173
6174 /* some of the internal memories
6175  * are not directly readable from the driver
6176  * to test them we send debug packets
6177  */
6178 static int bnx2x_int_mem_test(struct bnx2x *bp)
6179 {
6180         int factor;
6181         int count, i;
6182         u32 val = 0;
6183
6184         if (CHIP_REV_IS_FPGA(bp))
6185                 factor = 120;
6186         else if (CHIP_REV_IS_EMUL(bp))
6187                 factor = 200;
6188         else
6189                 factor = 1;
6190
6191         DP(NETIF_MSG_HW, "start part1\n");
6192
6193         /* Disable inputs of parser neighbor blocks */
6194         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6195         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6196         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6197         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6198
6199         /*  Write 0 to parser credits for CFC search request */
6200         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6201
6202         /* send Ethernet packet */
6203         bnx2x_lb_pckt(bp);
6204
6205         /* TODO do i reset NIG statistic? */
6206         /* Wait until NIG register shows 1 packet of size 0x10 */
6207         count = 1000 * factor;
6208         while (count) {
6209
6210                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6211                 val = *bnx2x_sp(bp, wb_data[0]);
6212                 if (val == 0x10)
6213                         break;
6214
6215                 msleep(10);
6216                 count--;
6217         }
6218         if (val != 0x10) {
6219                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6220                 return -1;
6221         }
6222
6223         /* Wait until PRS register shows 1 packet */
6224         count = 1000 * factor;
6225         while (count) {
6226                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6227                 if (val == 1)
6228                         break;
6229
6230                 msleep(10);
6231                 count--;
6232         }
6233         if (val != 0x1) {
6234                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6235                 return -2;
6236         }
6237
6238         /* Reset and init BRB, PRS */
6239         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6240         msleep(50);
6241         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6242         msleep(50);
6243         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6244         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6245
6246         DP(NETIF_MSG_HW, "part2\n");
6247
6248         /* Disable inputs of parser neighbor blocks */
6249         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6250         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6251         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6252         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6253
6254         /* Write 0 to parser credits for CFC search request */
6255         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6256
6257         /* send 10 Ethernet packets */
6258         for (i = 0; i < 10; i++)
6259                 bnx2x_lb_pckt(bp);
6260
6261         /* Wait until NIG register shows 10 + 1
6262            packets of size 11*0x10 = 0xb0 */
6263         count = 1000 * factor;
6264         while (count) {
6265
6266                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6267                 val = *bnx2x_sp(bp, wb_data[0]);
6268                 if (val == 0xb0)
6269                         break;
6270
6271                 msleep(10);
6272                 count--;
6273         }
6274         if (val != 0xb0) {
6275                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6276                 return -3;
6277         }
6278
6279         /* Wait until PRS register shows 2 packets */
6280         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6281         if (val != 2)
6282                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6283
6284         /* Write 1 to parser credits for CFC search request */
6285         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6286
6287         /* Wait until PRS register shows 3 packets */
6288         msleep(10 * factor);
6289         /* Wait until NIG register shows 1 packet of size 0x10 */
6290         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6291         if (val != 3)
6292                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6293
6294         /* clear NIG EOP FIFO */
6295         for (i = 0; i < 11; i++)
6296                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6297         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6298         if (val != 1) {
6299                 BNX2X_ERR("clear of NIG failed\n");
6300                 return -4;
6301         }
6302
6303         /* Reset and init BRB, PRS, NIG */
6304         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6305         msleep(50);
6306         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6307         msleep(50);
6308         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6309         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6310 #ifndef BCM_CNIC
6311         /* set NIC mode */
6312         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6313 #endif
6314
6315         /* Enable inputs of parser neighbor blocks */
6316         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6317         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6318         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6319         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6320
6321         DP(NETIF_MSG_HW, "done\n");
6322
6323         return 0; /* OK */
6324 }
6325
6326 static void enable_blocks_attention(struct bnx2x *bp)
6327 {
6328         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6329         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6330         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6331         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6332         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6333         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6334         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6335         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6336         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6337 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6338 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6339         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6340         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6341         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6342 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6343 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6344         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6345         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6346         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6347         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6348 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6349 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6350         if (CHIP_REV_IS_FPGA(bp))
6351                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6352         else
6353                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6354         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6355         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6356         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6357 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6358 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6359         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6360         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6361 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6362         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
6363 }
6364
6365 static const struct {
6366         u32 addr;
6367         u32 mask;
6368 } bnx2x_parity_mask[] = {
6369         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6370         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6371         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6372         {HC_REG_HC_PRTY_MASK, 0xffffffff},
6373         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6374         {QM_REG_QM_PRTY_MASK, 0x0},
6375         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6376         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6377         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6378         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6379         {CDU_REG_CDU_PRTY_MASK, 0x0},
6380         {CFC_REG_CFC_PRTY_MASK, 0x0},
6381         {DBG_REG_DBG_PRTY_MASK, 0x0},
6382         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6383         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6384         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6385         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6386         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6387         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6388         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6389         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6390         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6391         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6392         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6393         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6394         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6395         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6396         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6397 };
6398
6399 static void enable_blocks_parity(struct bnx2x *bp)
6400 {
6401         int i, mask_arr_len =
6402                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6403
6404         for (i = 0; i < mask_arr_len; i++)
6405                 REG_WR(bp, bnx2x_parity_mask[i].addr,
6406                         bnx2x_parity_mask[i].mask);
6407 }
6408
6409
6410 static void bnx2x_reset_common(struct bnx2x *bp)
6411 {
6412         /* reset_common */
6413         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6414                0xd3ffff7f);
6415         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6416 }
6417
6418 static void bnx2x_init_pxp(struct bnx2x *bp)
6419 {
6420         u16 devctl;
6421         int r_order, w_order;
6422
6423         pci_read_config_word(bp->pdev,
6424                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6425         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6426         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6427         if (bp->mrrs == -1)
6428                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6429         else {
6430                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6431                 r_order = bp->mrrs;
6432         }
6433
6434         bnx2x_init_pxp_arb(bp, r_order, w_order);
6435 }
6436
6437 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6438 {
6439         int is_required;
6440         u32 val;
6441         int port;
6442
6443         if (BP_NOMCP(bp))
6444                 return;
6445
6446         is_required = 0;
6447         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6448               SHARED_HW_CFG_FAN_FAILURE_MASK;
6449
6450         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6451                 is_required = 1;
6452
6453         /*
6454          * The fan failure mechanism is usually related to the PHY type since
6455          * the power consumption of the board is affected by the PHY. Currently,
6456          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6457          */
6458         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6459                 for (port = PORT_0; port < PORT_MAX; port++) {
6460                         u32 phy_type =
6461                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6462                                          external_phy_config) &
6463                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6464                         is_required |=
6465                                 ((phy_type ==
6466                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6467                                  (phy_type ==
6468                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6469                                  (phy_type ==
6470                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6471                 }
6472
6473         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6474
6475         if (is_required == 0)
6476                 return;
6477
6478         /* Fan failure is indicated by SPIO 5 */
6479         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6480                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6481
6482         /* set to active low mode */
6483         val = REG_RD(bp, MISC_REG_SPIO_INT);
6484         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6485                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6486         REG_WR(bp, MISC_REG_SPIO_INT, val);
6487
6488         /* enable interrupt to signal the IGU */
6489         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6490         val |= (1 << MISC_REGISTERS_SPIO_5);
6491         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6492 }
6493
6494 static int bnx2x_init_common(struct bnx2x *bp)
6495 {
6496         u32 val, i;
6497 #ifdef BCM_CNIC
6498         u32 wb_write[2];
6499 #endif
6500
6501         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6502
6503         bnx2x_reset_common(bp);
6504         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6505         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6506
6507         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6508         if (CHIP_IS_E1H(bp))
6509                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6510
6511         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6512         msleep(30);
6513         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6514
6515         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6516         if (CHIP_IS_E1(bp)) {
6517                 /* enable HW interrupt from PXP on USDM overflow
6518                    bit 16 on INT_MASK_0 */
6519                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6520         }
6521
6522         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6523         bnx2x_init_pxp(bp);
6524
6525 #ifdef __BIG_ENDIAN
6526         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6527         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6528         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6529         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6530         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6531         /* make sure this value is 0 */
6532         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6533
6534 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6535         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6536         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6537         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6538         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6539 #endif
6540
6541         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6542 #ifdef BCM_CNIC
6543         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6544         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6545         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6546 #endif
6547
6548         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6549                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6550
6551         /* let the HW do it's magic ... */
6552         msleep(100);
6553         /* finish PXP init */
6554         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6555         if (val != 1) {
6556                 BNX2X_ERR("PXP2 CFG failed\n");
6557                 return -EBUSY;
6558         }
6559         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6560         if (val != 1) {
6561                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6562                 return -EBUSY;
6563         }
6564
6565         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6566         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6567
6568         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6569
6570         /* clean the DMAE memory */
6571         bp->dmae_ready = 1;
6572         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6573
6574         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6575         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6576         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6577         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6578
6579         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6580         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6581         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6582         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6583
6584         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6585
6586 #ifdef BCM_CNIC
6587         wb_write[0] = 0;
6588         wb_write[1] = 0;
6589         for (i = 0; i < 64; i++) {
6590                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6591                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6592
6593                 if (CHIP_IS_E1H(bp)) {
6594                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6595                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6596                                           wb_write, 2);
6597                 }
6598         }
6599 #endif
6600         /* soft reset pulse */
6601         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6602         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6603
6604 #ifdef BCM_CNIC
6605         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6606 #endif
6607
6608         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6609         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6610         if (!CHIP_REV_IS_SLOW(bp)) {
6611                 /* enable hw interrupt from doorbell Q */
6612                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6613         }
6614
6615         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6616         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6617         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6618 #ifndef BCM_CNIC
6619         /* set NIC mode */
6620         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6621 #endif
6622         if (CHIP_IS_E1H(bp))
6623                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6624
6625         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6626         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6627         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6628         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6629
6630         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6631         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6632         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6633         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6634
6635         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6636         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6637         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6638         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6639
6640         /* sync semi rtc */
6641         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6642                0x80000000);
6643         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6644                0x80000000);
6645
6646         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6647         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6648         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6649
6650         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6651         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6652                 REG_WR(bp, i, random32());
6653         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6654 #ifdef BCM_CNIC
6655         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6656         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6657         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6658         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6659         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6660         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6661         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6662         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6663         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6664         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6665 #endif
6666         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6667
6668         if (sizeof(union cdu_context) != 1024)
6669                 /* we currently assume that a context is 1024 bytes */
6670                 dev_alert(&bp->pdev->dev, "please adjust the size "
6671                                           "of cdu_context(%ld)\n",
6672                          (long)sizeof(union cdu_context));
6673
6674         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6675         val = (4 << 24) + (0 << 12) + 1024;
6676         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6677
6678         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6679         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6680         /* enable context validation interrupt from CFC */
6681         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6682
6683         /* set the thresholds to prevent CFC/CDU race */
6684         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6685
6686         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6687         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6688
6689         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6690         /* Reset PCIE errors for debug */
6691         REG_WR(bp, 0x2814, 0xffffffff);
6692         REG_WR(bp, 0x3820, 0xffffffff);
6693
6694         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6695         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6696         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6697         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6698
6699         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6700         if (CHIP_IS_E1H(bp)) {
6701                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6702                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6703         }
6704
6705         if (CHIP_REV_IS_SLOW(bp))
6706                 msleep(200);
6707
6708         /* finish CFC init */
6709         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6710         if (val != 1) {
6711                 BNX2X_ERR("CFC LL_INIT failed\n");
6712                 return -EBUSY;
6713         }
6714         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6715         if (val != 1) {
6716                 BNX2X_ERR("CFC AC_INIT failed\n");
6717                 return -EBUSY;
6718         }
6719         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6720         if (val != 1) {
6721                 BNX2X_ERR("CFC CAM_INIT failed\n");
6722                 return -EBUSY;
6723         }
6724         REG_WR(bp, CFC_REG_DEBUG0, 0);
6725
6726         /* read NIG statistic
6727            to see if this is our first up since powerup */
6728         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6729         val = *bnx2x_sp(bp, wb_data[0]);
6730
6731         /* do internal memory self test */
6732         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6733                 BNX2X_ERR("internal mem self test failed\n");
6734                 return -EBUSY;
6735         }
6736
6737         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6738         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6739         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6740         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6741         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6742                 bp->port.need_hw_lock = 1;
6743                 break;
6744
6745         default:
6746                 break;
6747         }
6748
6749         bnx2x_setup_fan_failure_detection(bp);
6750
6751         /* clear PXP2 attentions */
6752         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6753
6754         enable_blocks_attention(bp);
6755         if (CHIP_PARITY_SUPPORTED(bp))
6756                 enable_blocks_parity(bp);
6757
6758         if (!BP_NOMCP(bp)) {
6759                 bnx2x_acquire_phy_lock(bp);
6760                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6761                 bnx2x_release_phy_lock(bp);
6762         } else
6763                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6764
6765         return 0;
6766 }
6767
6768 static int bnx2x_init_port(struct bnx2x *bp)
6769 {
6770         int port = BP_PORT(bp);
6771         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6772         u32 low, high;
6773         u32 val;
6774
6775         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
6776
6777         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6778
6779         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6780         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6781
6782         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6783         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6784         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6785         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6786
6787 #ifdef BCM_CNIC
6788         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6789
6790         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6791         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6792         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6793 #endif
6794
6795         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6796
6797         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6798         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6799                 /* no pause for emulation and FPGA */
6800                 low = 0;
6801                 high = 513;
6802         } else {
6803                 if (IS_E1HMF(bp))
6804                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6805                 else if (bp->dev->mtu > 4096) {
6806                         if (bp->flags & ONE_PORT_FLAG)
6807                                 low = 160;
6808                         else {
6809                                 val = bp->dev->mtu;
6810                                 /* (24*1024 + val*4)/256 */
6811                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6812                         }
6813                 } else
6814                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6815                 high = low + 56;        /* 14*1024/256 */
6816         }
6817         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6818         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6819
6820
6821         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6822
6823         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6824         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6825         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6826         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6827
6828         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6829         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6830         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6831         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6832
6833         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6834         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6835
6836         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6837
6838         /* configure PBF to work without PAUSE mtu 9000 */
6839         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6840
6841         /* update threshold */
6842         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6843         /* update init credit */
6844         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6845
6846         /* probe changes */
6847         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6848         msleep(5);
6849         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6850
6851 #ifdef BCM_CNIC
6852         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6853 #endif
6854         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6855         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6856
6857         if (CHIP_IS_E1(bp)) {
6858                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6859                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6860         }
6861         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6862
6863         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6864         /* init aeu_mask_attn_func_0/1:
6865          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6866          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6867          *             bits 4-7 are used for "per vn group attention" */
6868         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6869                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6870
6871         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6872         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6873         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6874         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6875         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6876
6877         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6878
6879         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6880
6881         if (CHIP_IS_E1H(bp)) {
6882                 /* 0x2 disable e1hov, 0x1 enable */
6883                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6884                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6885
6886                 {
6887                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6888                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6889                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6890                 }
6891         }
6892
6893         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6894         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6895
6896         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6897         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6898                 {
6899                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6900
6901                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6902                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6903
6904                 /* The GPIO should be swapped if the swap register is
6905                    set and active */
6906                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6907                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6908
6909                 /* Select function upon port-swap configuration */
6910                 if (port == 0) {
6911                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6912                         aeu_gpio_mask = (swap_val && swap_override) ?
6913                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6914                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6915                 } else {
6916                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6917                         aeu_gpio_mask = (swap_val && swap_override) ?
6918                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6919                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6920                 }
6921                 val = REG_RD(bp, offset);
6922                 /* add GPIO3 to group */
6923                 val |= aeu_gpio_mask;
6924                 REG_WR(bp, offset, val);
6925                 }
6926                 break;
6927
6928         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6929         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6930                 /* add SPIO 5 to group 0 */
6931                 {
6932                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6933                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6934                 val = REG_RD(bp, reg_addr);
6935                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6936                 REG_WR(bp, reg_addr, val);
6937                 }
6938                 break;
6939
6940         default:
6941                 break;
6942         }
6943
6944         bnx2x__link_reset(bp);
6945
6946         return 0;
6947 }
6948
6949 #define ILT_PER_FUNC            (768/2)
6950 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6951 /* the phys address is shifted right 12 bits and has an added
6952    1=valid bit added to the 53rd bit
6953    then since this is a wide register(TM)
6954    we split it into two 32 bit writes
6955  */
6956 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6957 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6958 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6959 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6960
6961 #ifdef BCM_CNIC
6962 #define CNIC_ILT_LINES          127
6963 #define CNIC_CTX_PER_ILT        16
6964 #else
6965 #define CNIC_ILT_LINES          0
6966 #endif
6967
6968 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6969 {
6970         int reg;
6971
6972         if (CHIP_IS_E1H(bp))
6973                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6974         else /* E1 */
6975                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6976
6977         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6978 }
6979
6980 static int bnx2x_init_func(struct bnx2x *bp)
6981 {
6982         int port = BP_PORT(bp);
6983         int func = BP_FUNC(bp);
6984         u32 addr, val;
6985         int i;
6986
6987         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
6988
6989         /* set MSI reconfigure capability */
6990         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6991         val = REG_RD(bp, addr);
6992         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6993         REG_WR(bp, addr, val);
6994
6995         i = FUNC_ILT_BASE(func);
6996
6997         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6998         if (CHIP_IS_E1H(bp)) {
6999                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
7000                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
7001         } else /* E1 */
7002                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
7003                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
7004
7005 #ifdef BCM_CNIC
7006         i += 1 + CNIC_ILT_LINES;
7007         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
7008         if (CHIP_IS_E1(bp))
7009                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
7010         else {
7011                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
7012                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
7013         }
7014
7015         i++;
7016         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7017         if (CHIP_IS_E1(bp))
7018                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7019         else {
7020                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7021                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7022         }
7023
7024         i++;
7025         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7026         if (CHIP_IS_E1(bp))
7027                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7028         else {
7029                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7030                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7031         }
7032
7033         /* tell the searcher where the T2 table is */
7034         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7035
7036         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7037                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7038
7039         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7040                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7041                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7042
7043         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7044 #endif
7045
7046         if (CHIP_IS_E1H(bp)) {
7047                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7048                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7049                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7050                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7051                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7052                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7053                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7054                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7055                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7056
7057                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7058                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7059         }
7060
7061         /* HC init per function */
7062         if (CHIP_IS_E1H(bp)) {
7063                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7064
7065                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7066                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7067         }
7068         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7069
7070         /* Reset PCIE errors for debug */
7071         REG_WR(bp, 0x2114, 0xffffffff);
7072         REG_WR(bp, 0x2120, 0xffffffff);
7073
7074         return 0;
7075 }
7076
7077 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7078 {
7079         int i, rc = 0;
7080
7081         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
7082            BP_FUNC(bp), load_code);
7083
7084         bp->dmae_ready = 0;
7085         mutex_init(&bp->dmae_mutex);
7086         rc = bnx2x_gunzip_init(bp);
7087         if (rc)
7088                 return rc;
7089
7090         switch (load_code) {
7091         case FW_MSG_CODE_DRV_LOAD_COMMON:
7092                 rc = bnx2x_init_common(bp);
7093                 if (rc)
7094                         goto init_hw_err;
7095                 /* no break */
7096
7097         case FW_MSG_CODE_DRV_LOAD_PORT:
7098                 bp->dmae_ready = 1;
7099                 rc = bnx2x_init_port(bp);
7100                 if (rc)
7101                         goto init_hw_err;
7102                 /* no break */
7103
7104         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7105                 bp->dmae_ready = 1;
7106                 rc = bnx2x_init_func(bp);
7107                 if (rc)
7108                         goto init_hw_err;
7109                 break;
7110
7111         default:
7112                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7113                 break;
7114         }
7115
7116         if (!BP_NOMCP(bp)) {
7117                 int func = BP_FUNC(bp);
7118
7119                 bp->fw_drv_pulse_wr_seq =
7120                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7121                                  DRV_PULSE_SEQ_MASK);
7122                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7123         }
7124
7125         /* this needs to be done before gunzip end */
7126         bnx2x_zero_def_sb(bp);
7127         for_each_queue(bp, i)
7128                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7129 #ifdef BCM_CNIC
7130         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7131 #endif
7132
7133 init_hw_err:
7134         bnx2x_gunzip_end(bp);
7135
7136         return rc;
7137 }
7138
7139 static void bnx2x_free_mem(struct bnx2x *bp)
7140 {
7141
7142 #define BNX2X_PCI_FREE(x, y, size) \
7143         do { \
7144                 if (x) { \
7145                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
7146                         x = NULL; \
7147                         y = 0; \
7148                 } \
7149         } while (0)
7150
7151 #define BNX2X_FREE(x) \
7152         do { \
7153                 if (x) { \
7154                         vfree(x); \
7155                         x = NULL; \
7156                 } \
7157         } while (0)
7158
7159         int i;
7160
7161         /* fastpath */
7162         /* Common */
7163         for_each_queue(bp, i) {
7164
7165                 /* status blocks */
7166                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7167                                bnx2x_fp(bp, i, status_blk_mapping),
7168                                sizeof(struct host_status_block));
7169         }
7170         /* Rx */
7171         for_each_queue(bp, i) {
7172
7173                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7174                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7175                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7176                                bnx2x_fp(bp, i, rx_desc_mapping),
7177                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
7178
7179                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7180                                bnx2x_fp(bp, i, rx_comp_mapping),
7181                                sizeof(struct eth_fast_path_rx_cqe) *
7182                                NUM_RCQ_BD);
7183
7184                 /* SGE ring */
7185                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7186                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7187                                bnx2x_fp(bp, i, rx_sge_mapping),
7188                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7189         }
7190         /* Tx */
7191         for_each_queue(bp, i) {
7192
7193                 /* fastpath tx rings: tx_buf tx_desc */
7194                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7195                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7196                                bnx2x_fp(bp, i, tx_desc_mapping),
7197                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7198         }
7199         /* end of fastpath */
7200
7201         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7202                        sizeof(struct host_def_status_block));
7203
7204         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7205                        sizeof(struct bnx2x_slowpath));
7206
7207 #ifdef BCM_CNIC
7208         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7209         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7210         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7211         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7212         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7213                        sizeof(struct host_status_block));
7214 #endif
7215         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7216
7217 #undef BNX2X_PCI_FREE
7218 #undef BNX2X_KFREE
7219 }
7220
7221 static int bnx2x_alloc_mem(struct bnx2x *bp)
7222 {
7223
7224 #define BNX2X_PCI_ALLOC(x, y, size) \
7225         do { \
7226                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7227                 if (x == NULL) \
7228                         goto alloc_mem_err; \
7229                 memset(x, 0, size); \
7230         } while (0)
7231
7232 #define BNX2X_ALLOC(x, size) \
7233         do { \
7234                 x = vmalloc(size); \
7235                 if (x == NULL) \
7236                         goto alloc_mem_err; \
7237                 memset(x, 0, size); \
7238         } while (0)
7239
7240         int i;
7241
7242         /* fastpath */
7243         /* Common */
7244         for_each_queue(bp, i) {
7245                 bnx2x_fp(bp, i, bp) = bp;
7246
7247                 /* status blocks */
7248                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7249                                 &bnx2x_fp(bp, i, status_blk_mapping),
7250                                 sizeof(struct host_status_block));
7251         }
7252         /* Rx */
7253         for_each_queue(bp, i) {
7254
7255                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7256                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7257                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7258                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7259                                 &bnx2x_fp(bp, i, rx_desc_mapping),
7260                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7261
7262                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7263                                 &bnx2x_fp(bp, i, rx_comp_mapping),
7264                                 sizeof(struct eth_fast_path_rx_cqe) *
7265                                 NUM_RCQ_BD);
7266
7267                 /* SGE ring */
7268                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7269                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7270                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7271                                 &bnx2x_fp(bp, i, rx_sge_mapping),
7272                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7273         }
7274         /* Tx */
7275         for_each_queue(bp, i) {
7276
7277                 /* fastpath tx rings: tx_buf tx_desc */
7278                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7279                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7280                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7281                                 &bnx2x_fp(bp, i, tx_desc_mapping),
7282                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7283         }
7284         /* end of fastpath */
7285
7286         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7287                         sizeof(struct host_def_status_block));
7288
7289         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7290                         sizeof(struct bnx2x_slowpath));
7291
7292 #ifdef BCM_CNIC
7293         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7294
7295         /* allocate searcher T2 table
7296            we allocate 1/4 of alloc num for T2
7297           (which is not entered into the ILT) */
7298         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7299
7300         /* Initialize T2 (for 1024 connections) */
7301         for (i = 0; i < 16*1024; i += 64)
7302                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7303
7304         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7305         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7306
7307         /* QM queues (128*MAX_CONN) */
7308         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7309
7310         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7311                         sizeof(struct host_status_block));
7312 #endif
7313
7314         /* Slow path ring */
7315         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7316
7317         return 0;
7318
7319 alloc_mem_err:
7320         bnx2x_free_mem(bp);
7321         return -ENOMEM;
7322
7323 #undef BNX2X_PCI_ALLOC
7324 #undef BNX2X_ALLOC
7325 }
7326
7327 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7328 {
7329         int i;
7330
7331         for_each_queue(bp, i) {
7332                 struct bnx2x_fastpath *fp = &bp->fp[i];
7333
7334                 u16 bd_cons = fp->tx_bd_cons;
7335                 u16 sw_prod = fp->tx_pkt_prod;
7336                 u16 sw_cons = fp->tx_pkt_cons;
7337
7338                 while (sw_cons != sw_prod) {
7339                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7340                         sw_cons++;
7341                 }
7342         }
7343 }
7344
7345 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7346 {
7347         int i, j;
7348
7349         for_each_queue(bp, j) {
7350                 struct bnx2x_fastpath *fp = &bp->fp[j];
7351
7352                 for (i = 0; i < NUM_RX_BD; i++) {
7353                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7354                         struct sk_buff *skb = rx_buf->skb;
7355
7356                         if (skb == NULL)
7357                                 continue;
7358
7359                         dma_unmap_single(&bp->pdev->dev,
7360                                          dma_unmap_addr(rx_buf, mapping),
7361                                          bp->rx_buf_size, DMA_FROM_DEVICE);
7362
7363                         rx_buf->skb = NULL;
7364                         dev_kfree_skb(skb);
7365                 }
7366                 if (!fp->disable_tpa)
7367                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7368                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
7369                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
7370         }
7371 }
7372
7373 static void bnx2x_free_skbs(struct bnx2x *bp)
7374 {
7375         bnx2x_free_tx_skbs(bp);
7376         bnx2x_free_rx_skbs(bp);
7377 }
7378
7379 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7380 {
7381         int i, offset = 1;
7382
7383         free_irq(bp->msix_table[0].vector, bp->dev);
7384         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7385            bp->msix_table[0].vector);
7386
7387 #ifdef BCM_CNIC
7388         offset++;
7389 #endif
7390         for_each_queue(bp, i) {
7391                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
7392                    "state %x\n", i, bp->msix_table[i + offset].vector,
7393                    bnx2x_fp(bp, i, state));
7394
7395                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7396         }
7397 }
7398
7399 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7400 {
7401         if (bp->flags & USING_MSIX_FLAG) {
7402                 if (!disable_only)
7403                         bnx2x_free_msix_irqs(bp);
7404                 pci_disable_msix(bp->pdev);
7405                 bp->flags &= ~USING_MSIX_FLAG;
7406
7407         } else if (bp->flags & USING_MSI_FLAG) {
7408                 if (!disable_only)
7409                         free_irq(bp->pdev->irq, bp->dev);
7410                 pci_disable_msi(bp->pdev);
7411                 bp->flags &= ~USING_MSI_FLAG;
7412
7413         } else if (!disable_only)
7414                 free_irq(bp->pdev->irq, bp->dev);
7415 }
7416
7417 static int bnx2x_enable_msix(struct bnx2x *bp)
7418 {
7419         int i, rc, offset = 1;
7420         int igu_vec = 0;
7421
7422         bp->msix_table[0].entry = igu_vec;
7423         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7424
7425 #ifdef BCM_CNIC
7426         igu_vec = BP_L_ID(bp) + offset;
7427         bp->msix_table[1].entry = igu_vec;
7428         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7429         offset++;
7430 #endif
7431         for_each_queue(bp, i) {
7432                 igu_vec = BP_L_ID(bp) + offset + i;
7433                 bp->msix_table[i + offset].entry = igu_vec;
7434                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7435                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7436         }
7437
7438         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7439                              BNX2X_NUM_QUEUES(bp) + offset);
7440
7441         /*
7442          * reconfigure number of tx/rx queues according to available
7443          * MSI-X vectors
7444          */
7445         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7446                 /* vectors available for FP */
7447                 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7448
7449                 DP(NETIF_MSG_IFUP,
7450                    "Trying to use less MSI-X vectors: %d\n", rc);
7451
7452                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7453
7454                 if (rc) {
7455                         DP(NETIF_MSG_IFUP,
7456                            "MSI-X is not attainable  rc %d\n", rc);
7457                         return rc;
7458                 }
7459
7460                 bp->num_queues = min(bp->num_queues, fp_vec);
7461
7462                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7463                                   bp->num_queues);
7464         } else if (rc) {
7465                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7466                 return rc;
7467         }
7468
7469         bp->flags |= USING_MSIX_FLAG;
7470
7471         return 0;
7472 }
7473
7474 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7475 {
7476         int i, rc, offset = 1;
7477
7478         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7479                          bp->dev->name, bp->dev);
7480         if (rc) {
7481                 BNX2X_ERR("request sp irq failed\n");
7482                 return -EBUSY;
7483         }
7484
7485 #ifdef BCM_CNIC
7486         offset++;
7487 #endif
7488         for_each_queue(bp, i) {
7489                 struct bnx2x_fastpath *fp = &bp->fp[i];
7490                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7491                          bp->dev->name, i);
7492
7493                 rc = request_irq(bp->msix_table[i + offset].vector,
7494                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7495                 if (rc) {
7496                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7497                         bnx2x_free_msix_irqs(bp);
7498                         return -EBUSY;
7499                 }
7500
7501                 fp->state = BNX2X_FP_STATE_IRQ;
7502         }
7503
7504         i = BNX2X_NUM_QUEUES(bp);
7505         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
7506                " ... fp[%d] %d\n",
7507                bp->msix_table[0].vector,
7508                0, bp->msix_table[offset].vector,
7509                i - 1, bp->msix_table[offset + i - 1].vector);
7510
7511         return 0;
7512 }
7513
7514 static int bnx2x_enable_msi(struct bnx2x *bp)
7515 {
7516         int rc;
7517
7518         rc = pci_enable_msi(bp->pdev);
7519         if (rc) {
7520                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7521                 return -1;
7522         }
7523         bp->flags |= USING_MSI_FLAG;
7524
7525         return 0;
7526 }
7527
7528 static int bnx2x_req_irq(struct bnx2x *bp)
7529 {
7530         unsigned long flags;
7531         int rc;
7532
7533         if (bp->flags & USING_MSI_FLAG)
7534                 flags = 0;
7535         else
7536                 flags = IRQF_SHARED;
7537
7538         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7539                          bp->dev->name, bp->dev);
7540         if (!rc)
7541                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7542
7543         return rc;
7544 }
7545
7546 static void bnx2x_napi_enable(struct bnx2x *bp)
7547 {
7548         int i;
7549
7550         for_each_queue(bp, i)
7551                 napi_enable(&bnx2x_fp(bp, i, napi));
7552 }
7553
7554 static void bnx2x_napi_disable(struct bnx2x *bp)
7555 {
7556         int i;
7557
7558         for_each_queue(bp, i)
7559                 napi_disable(&bnx2x_fp(bp, i, napi));
7560 }
7561
7562 static void bnx2x_netif_start(struct bnx2x *bp)
7563 {
7564         int intr_sem;
7565
7566         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7567         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7568
7569         if (intr_sem) {
7570                 if (netif_running(bp->dev)) {
7571                         bnx2x_napi_enable(bp);
7572                         bnx2x_int_enable(bp);
7573                         if (bp->state == BNX2X_STATE_OPEN)
7574                                 netif_tx_wake_all_queues(bp->dev);
7575                 }
7576         }
7577 }
7578
7579 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7580 {
7581         bnx2x_int_disable_sync(bp, disable_hw);
7582         bnx2x_napi_disable(bp);
7583         netif_tx_disable(bp->dev);
7584 }
7585
7586 /*
7587  * Init service functions
7588  */
7589
7590 /**
7591  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7592  *
7593  * @param bp driver descriptor
7594  * @param set set or clear an entry (1 or 0)
7595  * @param mac pointer to a buffer containing a MAC
7596  * @param cl_bit_vec bit vector of clients to register a MAC for
7597  * @param cam_offset offset in a CAM to use
7598  * @param with_bcast set broadcast MAC as well
7599  */
7600 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7601                                       u32 cl_bit_vec, u8 cam_offset,
7602                                       u8 with_bcast)
7603 {
7604         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7605         int port = BP_PORT(bp);
7606
7607         /* CAM allocation
7608          * unicasts 0-31:port0 32-63:port1
7609          * multicast 64-127:port0 128-191:port1
7610          */
7611         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7612         config->hdr.offset = cam_offset;
7613         config->hdr.client_id = 0xff;
7614         config->hdr.reserved1 = 0;
7615
7616         /* primary MAC */
7617         config->config_table[0].cam_entry.msb_mac_addr =
7618                                         swab16(*(u16 *)&mac[0]);
7619         config->config_table[0].cam_entry.middle_mac_addr =
7620                                         swab16(*(u16 *)&mac[2]);
7621         config->config_table[0].cam_entry.lsb_mac_addr =
7622                                         swab16(*(u16 *)&mac[4]);
7623         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7624         if (set)
7625                 config->config_table[0].target_table_entry.flags = 0;
7626         else
7627                 CAM_INVALIDATE(config->config_table[0]);
7628         config->config_table[0].target_table_entry.clients_bit_vector =
7629                                                 cpu_to_le32(cl_bit_vec);
7630         config->config_table[0].target_table_entry.vlan_id = 0;
7631
7632         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7633            (set ? "setting" : "clearing"),
7634            config->config_table[0].cam_entry.msb_mac_addr,
7635            config->config_table[0].cam_entry.middle_mac_addr,
7636            config->config_table[0].cam_entry.lsb_mac_addr);
7637
7638         /* broadcast */
7639         if (with_bcast) {
7640                 config->config_table[1].cam_entry.msb_mac_addr =
7641                         cpu_to_le16(0xffff);
7642                 config->config_table[1].cam_entry.middle_mac_addr =
7643                         cpu_to_le16(0xffff);
7644                 config->config_table[1].cam_entry.lsb_mac_addr =
7645                         cpu_to_le16(0xffff);
7646                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7647                 if (set)
7648                         config->config_table[1].target_table_entry.flags =
7649                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7650                 else
7651                         CAM_INVALIDATE(config->config_table[1]);
7652                 config->config_table[1].target_table_entry.clients_bit_vector =
7653                                                         cpu_to_le32(cl_bit_vec);
7654                 config->config_table[1].target_table_entry.vlan_id = 0;
7655         }
7656
7657         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7658                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7659                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7660 }
7661
7662 /**
7663  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7664  *
7665  * @param bp driver descriptor
7666  * @param set set or clear an entry (1 or 0)
7667  * @param mac pointer to a buffer containing a MAC
7668  * @param cl_bit_vec bit vector of clients to register a MAC for
7669  * @param cam_offset offset in a CAM to use
7670  */
7671 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7672                                        u32 cl_bit_vec, u8 cam_offset)
7673 {
7674         struct mac_configuration_cmd_e1h *config =
7675                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7676
7677         config->hdr.length = 1;
7678         config->hdr.offset = cam_offset;
7679         config->hdr.client_id = 0xff;
7680         config->hdr.reserved1 = 0;
7681
7682         /* primary MAC */
7683         config->config_table[0].msb_mac_addr =
7684                                         swab16(*(u16 *)&mac[0]);
7685         config->config_table[0].middle_mac_addr =
7686                                         swab16(*(u16 *)&mac[2]);
7687         config->config_table[0].lsb_mac_addr =
7688                                         swab16(*(u16 *)&mac[4]);
7689         config->config_table[0].clients_bit_vector =
7690                                         cpu_to_le32(cl_bit_vec);
7691         config->config_table[0].vlan_id = 0;
7692         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7693         if (set)
7694                 config->config_table[0].flags = BP_PORT(bp);
7695         else
7696                 config->config_table[0].flags =
7697                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7698
7699         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7700            (set ? "setting" : "clearing"),
7701            config->config_table[0].msb_mac_addr,
7702            config->config_table[0].middle_mac_addr,
7703            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7704
7705         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7706                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7707                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7708 }
7709
7710 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7711                              int *state_p, int poll)
7712 {
7713         /* can take a while if any port is running */
7714         int cnt = 5000;
7715
7716         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7717            poll ? "polling" : "waiting", state, idx);
7718
7719         might_sleep();
7720         while (cnt--) {
7721                 if (poll) {
7722                         bnx2x_rx_int(bp->fp, 10);
7723                         /* if index is different from 0
7724                          * the reply for some commands will
7725                          * be on the non default queue
7726                          */
7727                         if (idx)
7728                                 bnx2x_rx_int(&bp->fp[idx], 10);
7729                 }
7730
7731                 mb(); /* state is changed by bnx2x_sp_event() */
7732                 if (*state_p == state) {
7733 #ifdef BNX2X_STOP_ON_ERROR
7734                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7735 #endif
7736                         return 0;
7737                 }
7738
7739                 msleep(1);
7740
7741                 if (bp->panic)
7742                         return -EIO;
7743         }
7744
7745         /* timeout! */
7746         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7747                   poll ? "polling" : "waiting", state, idx);
7748 #ifdef BNX2X_STOP_ON_ERROR
7749         bnx2x_panic();
7750 #endif
7751
7752         return -EBUSY;
7753 }
7754
7755 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7756 {
7757         bp->set_mac_pending++;
7758         smp_wmb();
7759
7760         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7761                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7762
7763         /* Wait for a completion */
7764         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7765 }
7766
7767 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7768 {
7769         bp->set_mac_pending++;
7770         smp_wmb();
7771
7772         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7773                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7774                                   1);
7775
7776         /* Wait for a completion */
7777         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7778 }
7779
7780 #ifdef BCM_CNIC
7781 /**
7782  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7783  * MAC(s). This function will wait until the ramdord completion
7784  * returns.
7785  *
7786  * @param bp driver handle
7787  * @param set set or clear the CAM entry
7788  *
7789  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7790  */
7791 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7792 {
7793         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7794
7795         bp->set_mac_pending++;
7796         smp_wmb();
7797
7798         /* Send a SET_MAC ramrod */
7799         if (CHIP_IS_E1(bp))
7800                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7801                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7802                                   1);
7803         else
7804                 /* CAM allocation for E1H
7805                 * unicasts: by func number
7806                 * multicast: 20+FUNC*20, 20 each
7807                 */
7808                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7809                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7810
7811         /* Wait for a completion when setting */
7812         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7813
7814         return 0;
7815 }
7816 #endif
7817
7818 static int bnx2x_setup_leading(struct bnx2x *bp)
7819 {
7820         int rc;
7821
7822         /* reset IGU state */
7823         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7824
7825         /* SETUP ramrod */
7826         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7827
7828         /* Wait for completion */
7829         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7830
7831         return rc;
7832 }
7833
7834 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7835 {
7836         struct bnx2x_fastpath *fp = &bp->fp[index];
7837
7838         /* reset IGU state */
7839         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7840
7841         /* SETUP ramrod */
7842         fp->state = BNX2X_FP_STATE_OPENING;
7843         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7844                       fp->cl_id, 0);
7845
7846         /* Wait for completion */
7847         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7848                                  &(fp->state), 0);
7849 }
7850
7851 static int bnx2x_poll(struct napi_struct *napi, int budget);
7852
7853 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7854 {
7855
7856         switch (bp->multi_mode) {
7857         case ETH_RSS_MODE_DISABLED:
7858                 bp->num_queues = 1;
7859                 break;
7860
7861         case ETH_RSS_MODE_REGULAR:
7862                 if (num_queues)
7863                         bp->num_queues = min_t(u32, num_queues,
7864                                                   BNX2X_MAX_QUEUES(bp));
7865                 else
7866                         bp->num_queues = min_t(u32, num_online_cpus(),
7867                                                   BNX2X_MAX_QUEUES(bp));
7868                 break;
7869
7870
7871         default:
7872                 bp->num_queues = 1;
7873                 break;
7874         }
7875 }
7876
7877 static int bnx2x_set_num_queues(struct bnx2x *bp)
7878 {
7879         int rc = 0;
7880
7881         switch (int_mode) {
7882         case INT_MODE_INTx:
7883         case INT_MODE_MSI:
7884                 bp->num_queues = 1;
7885                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7886                 break;
7887         default:
7888                 /* Set number of queues according to bp->multi_mode value */
7889                 bnx2x_set_num_queues_msix(bp);
7890
7891                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7892                    bp->num_queues);
7893
7894                 /* if we can't use MSI-X we only need one fp,
7895                  * so try to enable MSI-X with the requested number of fp's
7896                  * and fallback to MSI or legacy INTx with one fp
7897                  */
7898                 rc = bnx2x_enable_msix(bp);
7899                 if (rc)
7900                         /* failed to enable MSI-X */
7901                         bp->num_queues = 1;
7902                 break;
7903         }
7904         bp->dev->real_num_tx_queues = bp->num_queues;
7905         return rc;
7906 }
7907
7908 #ifdef BCM_CNIC
7909 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7910 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7911 #endif
7912
7913 /* must be called with rtnl_lock */
7914 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7915 {
7916         u32 load_code;
7917         int i, rc;
7918
7919 #ifdef BNX2X_STOP_ON_ERROR
7920         if (unlikely(bp->panic))
7921                 return -EPERM;
7922 #endif
7923
7924         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7925
7926         rc = bnx2x_set_num_queues(bp);
7927
7928         if (bnx2x_alloc_mem(bp)) {
7929                 bnx2x_free_irq(bp, true);
7930                 return -ENOMEM;
7931         }
7932
7933         for_each_queue(bp, i)
7934                 bnx2x_fp(bp, i, disable_tpa) =
7935                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7936
7937         for_each_queue(bp, i)
7938                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7939                                bnx2x_poll, 128);
7940
7941         bnx2x_napi_enable(bp);
7942
7943         if (bp->flags & USING_MSIX_FLAG) {
7944                 rc = bnx2x_req_msix_irqs(bp);
7945                 if (rc) {
7946                         bnx2x_free_irq(bp, true);
7947                         goto load_error1;
7948                 }
7949         } else {
7950                 /* Fall to INTx if failed to enable MSI-X due to lack of
7951                    memory (in bnx2x_set_num_queues()) */
7952                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7953                         bnx2x_enable_msi(bp);
7954                 bnx2x_ack_int(bp);
7955                 rc = bnx2x_req_irq(bp);
7956                 if (rc) {
7957                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7958                         bnx2x_free_irq(bp, true);
7959                         goto load_error1;
7960                 }
7961                 if (bp->flags & USING_MSI_FLAG) {
7962                         bp->dev->irq = bp->pdev->irq;
7963                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7964                                     bp->pdev->irq);
7965                 }
7966         }
7967
7968         /* Send LOAD_REQUEST command to MCP
7969            Returns the type of LOAD command:
7970            if it is the first port to be initialized
7971            common blocks should be initialized, otherwise - not
7972         */
7973         if (!BP_NOMCP(bp)) {
7974                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7975                 if (!load_code) {
7976                         BNX2X_ERR("MCP response failure, aborting\n");
7977                         rc = -EBUSY;
7978                         goto load_error2;
7979                 }
7980                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7981                         rc = -EBUSY; /* other port in diagnostic mode */
7982                         goto load_error2;
7983                 }
7984
7985         } else {
7986                 int port = BP_PORT(bp);
7987
7988                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7989                    load_count[0], load_count[1], load_count[2]);
7990                 load_count[0]++;
7991                 load_count[1 + port]++;
7992                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7993                    load_count[0], load_count[1], load_count[2]);
7994                 if (load_count[0] == 1)
7995                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7996                 else if (load_count[1 + port] == 1)
7997                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7998                 else
7999                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
8000         }
8001
8002         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
8003             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
8004                 bp->port.pmf = 1;
8005         else
8006                 bp->port.pmf = 0;
8007         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
8008
8009         /* Initialize HW */
8010         rc = bnx2x_init_hw(bp, load_code);
8011         if (rc) {
8012                 BNX2X_ERR("HW init failed, aborting\n");
8013                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8014                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8015                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8016                 goto load_error2;
8017         }
8018
8019         /* Setup NIC internals and enable interrupts */
8020         bnx2x_nic_init(bp, load_code);
8021
8022         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8023             (bp->common.shmem2_base))
8024                 SHMEM2_WR(bp, dcc_support,
8025                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8026                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8027
8028         /* Send LOAD_DONE command to MCP */
8029         if (!BP_NOMCP(bp)) {
8030                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8031                 if (!load_code) {
8032                         BNX2X_ERR("MCP response failure, aborting\n");
8033                         rc = -EBUSY;
8034                         goto load_error3;
8035                 }
8036         }
8037
8038         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8039
8040         rc = bnx2x_setup_leading(bp);
8041         if (rc) {
8042                 BNX2X_ERR("Setup leading failed!\n");
8043 #ifndef BNX2X_STOP_ON_ERROR
8044                 goto load_error3;
8045 #else
8046                 bp->panic = 1;
8047                 return -EBUSY;
8048 #endif
8049         }
8050
8051         if (CHIP_IS_E1H(bp))
8052                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8053                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8054                         bp->flags |= MF_FUNC_DIS;
8055                 }
8056
8057         if (bp->state == BNX2X_STATE_OPEN) {
8058 #ifdef BCM_CNIC
8059                 /* Enable Timer scan */
8060                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8061 #endif
8062                 for_each_nondefault_queue(bp, i) {
8063                         rc = bnx2x_setup_multi(bp, i);
8064                         if (rc)
8065 #ifdef BCM_CNIC
8066                                 goto load_error4;
8067 #else
8068                                 goto load_error3;
8069 #endif
8070                 }
8071
8072                 if (CHIP_IS_E1(bp))
8073                         bnx2x_set_eth_mac_addr_e1(bp, 1);
8074                 else
8075                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
8076 #ifdef BCM_CNIC
8077                 /* Set iSCSI L2 MAC */
8078                 mutex_lock(&bp->cnic_mutex);
8079                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8080                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8081                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8082                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8083                                       CNIC_SB_ID(bp));
8084                 }
8085                 mutex_unlock(&bp->cnic_mutex);
8086 #endif
8087         }
8088
8089         if (bp->port.pmf)
8090                 bnx2x_initial_phy_init(bp, load_mode);
8091
8092         /* Start fast path */
8093         switch (load_mode) {
8094         case LOAD_NORMAL:
8095                 if (bp->state == BNX2X_STATE_OPEN) {
8096                         /* Tx queue should be only reenabled */
8097                         netif_tx_wake_all_queues(bp->dev);
8098                 }
8099                 /* Initialize the receive filter. */
8100                 bnx2x_set_rx_mode(bp->dev);
8101                 break;
8102
8103         case LOAD_OPEN:
8104                 netif_tx_start_all_queues(bp->dev);
8105                 if (bp->state != BNX2X_STATE_OPEN)
8106                         netif_tx_disable(bp->dev);
8107                 /* Initialize the receive filter. */
8108                 bnx2x_set_rx_mode(bp->dev);
8109                 break;
8110
8111         case LOAD_DIAG:
8112                 /* Initialize the receive filter. */
8113                 bnx2x_set_rx_mode(bp->dev);
8114                 bp->state = BNX2X_STATE_DIAG;
8115                 break;
8116
8117         default:
8118                 break;
8119         }
8120
8121         if (!bp->port.pmf)
8122                 bnx2x__link_status_update(bp);
8123
8124         /* start the timer */
8125         mod_timer(&bp->timer, jiffies + bp->current_interval);
8126
8127 #ifdef BCM_CNIC
8128         bnx2x_setup_cnic_irq_info(bp);
8129         if (bp->state == BNX2X_STATE_OPEN)
8130                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8131 #endif
8132         bnx2x_inc_load_cnt(bp);
8133
8134         return 0;
8135
8136 #ifdef BCM_CNIC
8137 load_error4:
8138         /* Disable Timer scan */
8139         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8140 #endif
8141 load_error3:
8142         bnx2x_int_disable_sync(bp, 1);
8143         if (!BP_NOMCP(bp)) {
8144                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8145                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8146         }
8147         bp->port.pmf = 0;
8148         /* Free SKBs, SGEs, TPA pool and driver internals */
8149         bnx2x_free_skbs(bp);
8150         for_each_queue(bp, i)
8151                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8152 load_error2:
8153         /* Release IRQs */
8154         bnx2x_free_irq(bp, false);
8155 load_error1:
8156         bnx2x_napi_disable(bp);
8157         for_each_queue(bp, i)
8158                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8159         bnx2x_free_mem(bp);
8160
8161         return rc;
8162 }
8163
8164 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8165 {
8166         struct bnx2x_fastpath *fp = &bp->fp[index];
8167         int rc;
8168
8169         /* halt the connection */
8170         fp->state = BNX2X_FP_STATE_HALTING;
8171         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8172
8173         /* Wait for completion */
8174         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8175                                &(fp->state), 1);
8176         if (rc) /* timeout */
8177                 return rc;
8178
8179         /* delete cfc entry */
8180         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8181
8182         /* Wait for completion */
8183         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8184                                &(fp->state), 1);
8185         return rc;
8186 }
8187
8188 static int bnx2x_stop_leading(struct bnx2x *bp)
8189 {
8190         __le16 dsb_sp_prod_idx;
8191         /* if the other port is handling traffic,
8192            this can take a lot of time */
8193         int cnt = 500;
8194         int rc;
8195
8196         might_sleep();
8197
8198         /* Send HALT ramrod */
8199         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8200         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8201
8202         /* Wait for completion */
8203         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8204                                &(bp->fp[0].state), 1);
8205         if (rc) /* timeout */
8206                 return rc;
8207
8208         dsb_sp_prod_idx = *bp->dsb_sp_prod;
8209
8210         /* Send PORT_DELETE ramrod */
8211         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8212
8213         /* Wait for completion to arrive on default status block
8214            we are going to reset the chip anyway
8215            so there is not much to do if this times out
8216          */
8217         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8218                 if (!cnt) {
8219                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8220                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8221                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
8222 #ifdef BNX2X_STOP_ON_ERROR
8223                         bnx2x_panic();
8224 #endif
8225                         rc = -EBUSY;
8226                         break;
8227                 }
8228                 cnt--;
8229                 msleep(1);
8230                 rmb(); /* Refresh the dsb_sp_prod */
8231         }
8232         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8233         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8234
8235         return rc;
8236 }
8237
8238 static void bnx2x_reset_func(struct bnx2x *bp)
8239 {
8240         int port = BP_PORT(bp);
8241         int func = BP_FUNC(bp);
8242         int base, i;
8243
8244         /* Configure IGU */
8245         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8246         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8247
8248 #ifdef BCM_CNIC
8249         /* Disable Timer scan */
8250         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8251         /*
8252          * Wait for at least 10ms and up to 2 second for the timers scan to
8253          * complete
8254          */
8255         for (i = 0; i < 200; i++) {
8256                 msleep(10);
8257                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8258                         break;
8259         }
8260 #endif
8261         /* Clear ILT */
8262         base = FUNC_ILT_BASE(func);
8263         for (i = base; i < base + ILT_PER_FUNC; i++)
8264                 bnx2x_ilt_wr(bp, i, 0);
8265 }
8266
8267 static void bnx2x_reset_port(struct bnx2x *bp)
8268 {
8269         int port = BP_PORT(bp);
8270         u32 val;
8271
8272         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8273
8274         /* Do not rcv packets to BRB */
8275         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8276         /* Do not direct rcv packets that are not for MCP to the BRB */
8277         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8278                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8279
8280         /* Configure AEU */
8281         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8282
8283         msleep(100);
8284         /* Check for BRB port occupancy */
8285         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8286         if (val)
8287                 DP(NETIF_MSG_IFDOWN,
8288                    "BRB1 is not empty  %d blocks are occupied\n", val);
8289
8290         /* TODO: Close Doorbell port? */
8291 }
8292
8293 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8294 {
8295         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
8296            BP_FUNC(bp), reset_code);
8297
8298         switch (reset_code) {
8299         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8300                 bnx2x_reset_port(bp);
8301                 bnx2x_reset_func(bp);
8302                 bnx2x_reset_common(bp);
8303                 break;
8304
8305         case FW_MSG_CODE_DRV_UNLOAD_PORT:
8306                 bnx2x_reset_port(bp);
8307                 bnx2x_reset_func(bp);
8308                 break;
8309
8310         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8311                 bnx2x_reset_func(bp);
8312                 break;
8313
8314         default:
8315                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8316                 break;
8317         }
8318 }
8319
8320 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8321 {
8322         int port = BP_PORT(bp);
8323         u32 reset_code = 0;
8324         int i, cnt, rc;
8325
8326         /* Wait until tx fastpath tasks complete */
8327         for_each_queue(bp, i) {
8328                 struct bnx2x_fastpath *fp = &bp->fp[i];
8329
8330                 cnt = 1000;
8331                 while (bnx2x_has_tx_work_unload(fp)) {
8332
8333                         bnx2x_tx_int(fp);
8334                         if (!cnt) {
8335                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
8336                                           i);
8337 #ifdef BNX2X_STOP_ON_ERROR
8338                                 bnx2x_panic();
8339                                 return -EBUSY;
8340 #else
8341                                 break;
8342 #endif
8343                         }
8344                         cnt--;
8345                         msleep(1);
8346                 }
8347         }
8348         /* Give HW time to discard old tx messages */
8349         msleep(1);
8350
8351         if (CHIP_IS_E1(bp)) {
8352                 struct mac_configuration_cmd *config =
8353                                                 bnx2x_sp(bp, mcast_config);
8354
8355                 bnx2x_set_eth_mac_addr_e1(bp, 0);
8356
8357                 for (i = 0; i < config->hdr.length; i++)
8358                         CAM_INVALIDATE(config->config_table[i]);
8359
8360                 config->hdr.length = i;
8361                 if (CHIP_REV_IS_SLOW(bp))
8362                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8363                 else
8364                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8365                 config->hdr.client_id = bp->fp->cl_id;
8366                 config->hdr.reserved1 = 0;
8367
8368                 bp->set_mac_pending++;
8369                 smp_wmb();
8370
8371                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8372                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8373                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8374
8375         } else { /* E1H */
8376                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8377
8378                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8379
8380                 for (i = 0; i < MC_HASH_SIZE; i++)
8381                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8382
8383                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8384         }
8385 #ifdef BCM_CNIC
8386         /* Clear iSCSI L2 MAC */
8387         mutex_lock(&bp->cnic_mutex);
8388         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8389                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8390                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8391         }
8392         mutex_unlock(&bp->cnic_mutex);
8393 #endif
8394
8395         if (unload_mode == UNLOAD_NORMAL)
8396                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8397
8398         else if (bp->flags & NO_WOL_FLAG)
8399                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8400
8401         else if (bp->wol) {
8402                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8403                 u8 *mac_addr = bp->dev->dev_addr;
8404                 u32 val;
8405                 /* The mac address is written to entries 1-4 to
8406                    preserve entry 0 which is used by the PMF */
8407                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8408
8409                 val = (mac_addr[0] << 8) | mac_addr[1];
8410                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8411
8412                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8413                       (mac_addr[4] << 8) | mac_addr[5];
8414                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8415
8416                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8417
8418         } else
8419                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8420
8421         /* Close multi and leading connections
8422            Completions for ramrods are collected in a synchronous way */
8423         for_each_nondefault_queue(bp, i)
8424                 if (bnx2x_stop_multi(bp, i))
8425                         goto unload_error;
8426
8427         rc = bnx2x_stop_leading(bp);
8428         if (rc) {
8429                 BNX2X_ERR("Stop leading failed!\n");
8430 #ifdef BNX2X_STOP_ON_ERROR
8431                 return -EBUSY;
8432 #else
8433                 goto unload_error;
8434 #endif
8435         }
8436
8437 unload_error:
8438         if (!BP_NOMCP(bp))
8439                 reset_code = bnx2x_fw_command(bp, reset_code);
8440         else {
8441                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8442                    load_count[0], load_count[1], load_count[2]);
8443                 load_count[0]--;
8444                 load_count[1 + port]--;
8445                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8446                    load_count[0], load_count[1], load_count[2]);
8447                 if (load_count[0] == 0)
8448                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8449                 else if (load_count[1 + port] == 0)
8450                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8451                 else
8452                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8453         }
8454
8455         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8456             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8457                 bnx2x__link_reset(bp);
8458
8459         /* Reset the chip */
8460         bnx2x_reset_chip(bp, reset_code);
8461
8462         /* Report UNLOAD_DONE to MCP */
8463         if (!BP_NOMCP(bp))
8464                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8465
8466 }
8467
8468 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8469 {
8470         u32 val;
8471
8472         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8473
8474         if (CHIP_IS_E1(bp)) {
8475                 int port = BP_PORT(bp);
8476                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8477                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8478
8479                 val = REG_RD(bp, addr);
8480                 val &= ~(0x300);
8481                 REG_WR(bp, addr, val);
8482         } else if (CHIP_IS_E1H(bp)) {
8483                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8484                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8485                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8486                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8487         }
8488 }
8489
8490 /* must be called with rtnl_lock */
8491 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8492 {
8493         int i;
8494
8495         if (bp->state == BNX2X_STATE_CLOSED) {
8496                 /* Interface has been removed - nothing to recover */
8497                 bp->recovery_state = BNX2X_RECOVERY_DONE;
8498                 bp->is_leader = 0;
8499                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8500                 smp_wmb();
8501
8502                 return -EINVAL;
8503         }
8504
8505 #ifdef BCM_CNIC
8506         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8507 #endif
8508         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8509
8510         /* Set "drop all" */
8511         bp->rx_mode = BNX2X_RX_MODE_NONE;
8512         bnx2x_set_storm_rx_mode(bp);
8513
8514         /* Disable HW interrupts, NAPI and Tx */
8515         bnx2x_netif_stop(bp, 1);
8516         netif_carrier_off(bp->dev);
8517
8518         del_timer_sync(&bp->timer);
8519         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8520                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8521         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8522
8523         /* Release IRQs */
8524         bnx2x_free_irq(bp, false);
8525
8526         /* Cleanup the chip if needed */
8527         if (unload_mode != UNLOAD_RECOVERY)
8528                 bnx2x_chip_cleanup(bp, unload_mode);
8529
8530         bp->port.pmf = 0;
8531
8532         /* Free SKBs, SGEs, TPA pool and driver internals */
8533         bnx2x_free_skbs(bp);
8534         for_each_queue(bp, i)
8535                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8536         for_each_queue(bp, i)
8537                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8538         bnx2x_free_mem(bp);
8539
8540         bp->state = BNX2X_STATE_CLOSED;
8541
8542         /* The last driver must disable a "close the gate" if there is no
8543          * parity attention or "process kill" pending.
8544          */
8545         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8546             bnx2x_reset_is_done(bp))
8547                 bnx2x_disable_close_the_gate(bp);
8548
8549         /* Reset MCP mail box sequence if there is on going recovery */
8550         if (unload_mode == UNLOAD_RECOVERY)
8551                 bp->fw_seq = 0;
8552
8553         return 0;
8554 }
8555
8556 /* Close gates #2, #3 and #4: */
8557 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8558 {
8559         u32 val, addr;
8560
8561         /* Gates #2 and #4a are closed/opened for "not E1" only */
8562         if (!CHIP_IS_E1(bp)) {
8563                 /* #4 */
8564                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8565                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8566                        close ? (val | 0x1) : (val & (~(u32)1)));
8567                 /* #2 */
8568                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8569                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8570                        close ? (val | 0x1) : (val & (~(u32)1)));
8571         }
8572
8573         /* #3 */
8574         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8575         val = REG_RD(bp, addr);
8576         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8577
8578         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8579                 close ? "closing" : "opening");
8580         mmiowb();
8581 }
8582
8583 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8584
8585 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8586 {
8587         /* Do some magic... */
8588         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8589         *magic_val = val & SHARED_MF_CLP_MAGIC;
8590         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8591 }
8592
8593 /* Restore the value of the `magic' bit.
8594  *
8595  * @param pdev Device handle.
8596  * @param magic_val Old value of the `magic' bit.
8597  */
8598 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8599 {
8600         /* Restore the `magic' bit value... */
8601         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8602         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8603                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8604         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8605         MF_CFG_WR(bp, shared_mf_config.clp_mb,
8606                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8607 }
8608
8609 /* Prepares for MCP reset: takes care of CLP configurations.
8610  *
8611  * @param bp
8612  * @param magic_val Old value of 'magic' bit.
8613  */
8614 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8615 {
8616         u32 shmem;
8617         u32 validity_offset;
8618
8619         DP(NETIF_MSG_HW, "Starting\n");
8620
8621         /* Set `magic' bit in order to save MF config */
8622         if (!CHIP_IS_E1(bp))
8623                 bnx2x_clp_reset_prep(bp, magic_val);
8624
8625         /* Get shmem offset */
8626         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8627         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8628
8629         /* Clear validity map flags */
8630         if (shmem > 0)
8631                 REG_WR(bp, shmem + validity_offset, 0);
8632 }
8633
8634 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
8635 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
8636
8637 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8638  * depending on the HW type.
8639  *
8640  * @param bp
8641  */
8642 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8643 {
8644         /* special handling for emulation and FPGA,
8645            wait 10 times longer */
8646         if (CHIP_REV_IS_SLOW(bp))
8647                 msleep(MCP_ONE_TIMEOUT*10);
8648         else
8649                 msleep(MCP_ONE_TIMEOUT);
8650 }
8651
8652 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8653 {
8654         u32 shmem, cnt, validity_offset, val;
8655         int rc = 0;
8656
8657         msleep(100);
8658
8659         /* Get shmem offset */
8660         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8661         if (shmem == 0) {
8662                 BNX2X_ERR("Shmem 0 return failure\n");
8663                 rc = -ENOTTY;
8664                 goto exit_lbl;
8665         }
8666
8667         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8668
8669         /* Wait for MCP to come up */
8670         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8671                 /* TBD: its best to check validity map of last port.
8672                  * currently checks on port 0.
8673                  */
8674                 val = REG_RD(bp, shmem + validity_offset);
8675                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8676                    shmem + validity_offset, val);
8677
8678                 /* check that shared memory is valid. */
8679                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8680                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8681                         break;
8682
8683                 bnx2x_mcp_wait_one(bp);
8684         }
8685
8686         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8687
8688         /* Check that shared memory is valid. This indicates that MCP is up. */
8689         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8690             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8691                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8692                 rc = -ENOTTY;
8693                 goto exit_lbl;
8694         }
8695
8696 exit_lbl:
8697         /* Restore the `magic' bit value */
8698         if (!CHIP_IS_E1(bp))
8699                 bnx2x_clp_reset_done(bp, magic_val);
8700
8701         return rc;
8702 }
8703
8704 static void bnx2x_pxp_prep(struct bnx2x *bp)
8705 {
8706         if (!CHIP_IS_E1(bp)) {
8707                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8708                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8709                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8710                 mmiowb();
8711         }
8712 }
8713
8714 /*
8715  * Reset the whole chip except for:
8716  *      - PCIE core
8717  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8718  *              one reset bit)
8719  *      - IGU
8720  *      - MISC (including AEU)
8721  *      - GRC
8722  *      - RBCN, RBCP
8723  */
8724 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8725 {
8726         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8727
8728         not_reset_mask1 =
8729                 MISC_REGISTERS_RESET_REG_1_RST_HC |
8730                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8731                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8732
8733         not_reset_mask2 =
8734                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8735                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8736                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8737                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8738                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8739                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
8740                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8741                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8742
8743         reset_mask1 = 0xffffffff;
8744
8745         if (CHIP_IS_E1(bp))
8746                 reset_mask2 = 0xffff;
8747         else
8748                 reset_mask2 = 0x1ffff;
8749
8750         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8751                reset_mask1 & (~not_reset_mask1));
8752         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8753                reset_mask2 & (~not_reset_mask2));
8754
8755         barrier();
8756         mmiowb();
8757
8758         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8759         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8760         mmiowb();
8761 }
8762
8763 static int bnx2x_process_kill(struct bnx2x *bp)
8764 {
8765         int cnt = 1000;
8766         u32 val = 0;
8767         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8768
8769
8770         /* Empty the Tetris buffer, wait for 1s */
8771         do {
8772                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8773                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8774                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8775                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8776                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8777                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8778                     ((port_is_idle_0 & 0x1) == 0x1) &&
8779                     ((port_is_idle_1 & 0x1) == 0x1) &&
8780                     (pgl_exp_rom2 == 0xffffffff))
8781                         break;
8782                 msleep(1);
8783         } while (cnt-- > 0);
8784
8785         if (cnt <= 0) {
8786                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8787                           " are still"
8788                           " outstanding read requests after 1s!\n");
8789                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8790                           " port_is_idle_0=0x%08x,"
8791                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8792                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8793                           pgl_exp_rom2);
8794                 return -EAGAIN;
8795         }
8796
8797         barrier();
8798
8799         /* Close gates #2, #3 and #4 */
8800         bnx2x_set_234_gates(bp, true);
8801
8802         /* TBD: Indicate that "process kill" is in progress to MCP */
8803
8804         /* Clear "unprepared" bit */
8805         REG_WR(bp, MISC_REG_UNPREPARED, 0);
8806         barrier();
8807
8808         /* Make sure all is written to the chip before the reset */
8809         mmiowb();
8810
8811         /* Wait for 1ms to empty GLUE and PCI-E core queues,
8812          * PSWHST, GRC and PSWRD Tetris buffer.
8813          */
8814         msleep(1);
8815
8816         /* Prepare to chip reset: */
8817         /* MCP */
8818         bnx2x_reset_mcp_prep(bp, &val);
8819
8820         /* PXP */
8821         bnx2x_pxp_prep(bp);
8822         barrier();
8823
8824         /* reset the chip */
8825         bnx2x_process_kill_chip_reset(bp);
8826         barrier();
8827
8828         /* Recover after reset: */
8829         /* MCP */
8830         if (bnx2x_reset_mcp_comp(bp, val))
8831                 return -EAGAIN;
8832
8833         /* PXP */
8834         bnx2x_pxp_prep(bp);
8835
8836         /* Open the gates #2, #3 and #4 */
8837         bnx2x_set_234_gates(bp, false);
8838
8839         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8840          * reset state, re-enable attentions. */
8841
8842         return 0;
8843 }
8844
8845 static int bnx2x_leader_reset(struct bnx2x *bp)
8846 {
8847         int rc = 0;
8848         /* Try to recover after the failure */
8849         if (bnx2x_process_kill(bp)) {
8850                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8851                        bp->dev->name);
8852                 rc = -EAGAIN;
8853                 goto exit_leader_reset;
8854         }
8855
8856         /* Clear "reset is in progress" bit and update the driver state */
8857         bnx2x_set_reset_done(bp);
8858         bp->recovery_state = BNX2X_RECOVERY_DONE;
8859
8860 exit_leader_reset:
8861         bp->is_leader = 0;
8862         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8863         smp_wmb();
8864         return rc;
8865 }
8866
8867 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8868
8869 /* Assumption: runs under rtnl lock. This together with the fact
8870  * that it's called only from bnx2x_reset_task() ensure that it
8871  * will never be called when netif_running(bp->dev) is false.
8872  */
8873 static void bnx2x_parity_recover(struct bnx2x *bp)
8874 {
8875         DP(NETIF_MSG_HW, "Handling parity\n");
8876         while (1) {
8877                 switch (bp->recovery_state) {
8878                 case BNX2X_RECOVERY_INIT:
8879                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8880                         /* Try to get a LEADER_LOCK HW lock */
8881                         if (bnx2x_trylock_hw_lock(bp,
8882                                 HW_LOCK_RESOURCE_RESERVED_08))
8883                                 bp->is_leader = 1;
8884
8885                         /* Stop the driver */
8886                         /* If interface has been removed - break */
8887                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8888                                 return;
8889
8890                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
8891                         /* Ensure "is_leader" and "recovery_state"
8892                          *  update values are seen on other CPUs
8893                          */
8894                         smp_wmb();
8895                         break;
8896
8897                 case BNX2X_RECOVERY_WAIT:
8898                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8899                         if (bp->is_leader) {
8900                                 u32 load_counter = bnx2x_get_load_cnt(bp);
8901                                 if (load_counter) {
8902                                         /* Wait until all other functions get
8903                                          * down.
8904                                          */
8905                                         schedule_delayed_work(&bp->reset_task,
8906                                                                 HZ/10);
8907                                         return;
8908                                 } else {
8909                                         /* If all other functions got down -
8910                                          * try to bring the chip back to
8911                                          * normal. In any case it's an exit
8912                                          * point for a leader.
8913                                          */
8914                                         if (bnx2x_leader_reset(bp) ||
8915                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
8916                                                 printk(KERN_ERR"%s: Recovery "
8917                                                 "has failed. Power cycle is "
8918                                                 "needed.\n", bp->dev->name);
8919                                                 /* Disconnect this device */
8920                                                 netif_device_detach(bp->dev);
8921                                                 /* Block ifup for all function
8922                                                  * of this ASIC until
8923                                                  * "process kill" or power
8924                                                  * cycle.
8925                                                  */
8926                                                 bnx2x_set_reset_in_progress(bp);
8927                                                 /* Shut down the power */
8928                                                 bnx2x_set_power_state(bp,
8929                                                                 PCI_D3hot);
8930                                                 return;
8931                                         }
8932
8933                                         return;
8934                                 }
8935                         } else { /* non-leader */
8936                                 if (!bnx2x_reset_is_done(bp)) {
8937                                         /* Try to get a LEADER_LOCK HW lock as
8938                                          * long as a former leader may have
8939                                          * been unloaded by the user or
8940                                          * released a leadership by another
8941                                          * reason.
8942                                          */
8943                                         if (bnx2x_trylock_hw_lock(bp,
8944                                             HW_LOCK_RESOURCE_RESERVED_08)) {
8945                                                 /* I'm a leader now! Restart a
8946                                                  * switch case.
8947                                                  */
8948                                                 bp->is_leader = 1;
8949                                                 break;
8950                                         }
8951
8952                                         schedule_delayed_work(&bp->reset_task,
8953                                                                 HZ/10);
8954                                         return;
8955
8956                                 } else { /* A leader has completed
8957                                           * the "process kill". It's an exit
8958                                           * point for a non-leader.
8959                                           */
8960                                         bnx2x_nic_load(bp, LOAD_NORMAL);
8961                                         bp->recovery_state =
8962                                                 BNX2X_RECOVERY_DONE;
8963                                         smp_wmb();
8964                                         return;
8965                                 }
8966                         }
8967                 default:
8968                         return;
8969                 }
8970         }
8971 }
8972
8973 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8974  * scheduled on a general queue in order to prevent a dead lock.
8975  */
8976 static void bnx2x_reset_task(struct work_struct *work)
8977 {
8978         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8979
8980 #ifdef BNX2X_STOP_ON_ERROR
8981         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8982                   " so reset not done to allow debug dump,\n"
8983          KERN_ERR " you will need to reboot when done\n");
8984         return;
8985 #endif
8986
8987         rtnl_lock();
8988
8989         if (!netif_running(bp->dev))
8990                 goto reset_task_exit;
8991
8992         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8993                 bnx2x_parity_recover(bp);
8994         else {
8995                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8996                 bnx2x_nic_load(bp, LOAD_NORMAL);
8997         }
8998
8999 reset_task_exit:
9000         rtnl_unlock();
9001 }
9002
9003 /* end of nic load/unload */
9004
9005 /* ethtool_ops */
9006
9007 /*
9008  * Init service functions
9009  */
9010
9011 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
9012 {
9013         switch (func) {
9014         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9015         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9016         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9017         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9018         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9019         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9020         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9021         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9022         default:
9023                 BNX2X_ERR("Unsupported function index: %d\n", func);
9024                 return (u32)(-1);
9025         }
9026 }
9027
9028 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9029 {
9030         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9031
9032         /* Flush all outstanding writes */
9033         mmiowb();
9034
9035         /* Pretend to be function 0 */
9036         REG_WR(bp, reg, 0);
9037         /* Flush the GRC transaction (in the chip) */
9038         new_val = REG_RD(bp, reg);
9039         if (new_val != 0) {
9040                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9041                           new_val);
9042                 BUG();
9043         }
9044
9045         /* From now we are in the "like-E1" mode */
9046         bnx2x_int_disable(bp);
9047
9048         /* Flush all outstanding writes */
9049         mmiowb();
9050
9051         /* Restore the original funtion settings */
9052         REG_WR(bp, reg, orig_func);
9053         new_val = REG_RD(bp, reg);
9054         if (new_val != orig_func) {
9055                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9056                           orig_func, new_val);
9057                 BUG();
9058         }
9059 }
9060
9061 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9062 {
9063         if (CHIP_IS_E1H(bp))
9064                 bnx2x_undi_int_disable_e1h(bp, func);
9065         else
9066                 bnx2x_int_disable(bp);
9067 }
9068
9069 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9070 {
9071         u32 val;
9072
9073         /* Check if there is any driver already loaded */
9074         val = REG_RD(bp, MISC_REG_UNPREPARED);
9075         if (val == 0x1) {
9076                 /* Check if it is the UNDI driver
9077                  * UNDI driver initializes CID offset for normal bell to 0x7
9078                  */
9079                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9080                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9081                 if (val == 0x7) {
9082                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9083                         /* save our func */
9084                         int func = BP_FUNC(bp);
9085                         u32 swap_en;
9086                         u32 swap_val;
9087
9088                         /* clear the UNDI indication */
9089                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9090
9091                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
9092
9093                         /* try unload UNDI on port 0 */
9094                         bp->func = 0;
9095                         bp->fw_seq =
9096                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9097                                 DRV_MSG_SEQ_NUMBER_MASK);
9098                         reset_code = bnx2x_fw_command(bp, reset_code);
9099
9100                         /* if UNDI is loaded on the other port */
9101                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9102
9103                                 /* send "DONE" for previous unload */
9104                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9105
9106                                 /* unload UNDI on port 1 */
9107                                 bp->func = 1;
9108                                 bp->fw_seq =
9109                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9110                                         DRV_MSG_SEQ_NUMBER_MASK);
9111                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9112
9113                                 bnx2x_fw_command(bp, reset_code);
9114                         }
9115
9116                         /* now it's safe to release the lock */
9117                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9118
9119                         bnx2x_undi_int_disable(bp, func);
9120
9121                         /* close input traffic and wait for it */
9122                         /* Do not rcv packets to BRB */
9123                         REG_WR(bp,
9124                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9125                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9126                         /* Do not direct rcv packets that are not for MCP to
9127                          * the BRB */
9128                         REG_WR(bp,
9129                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9130                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9131                         /* clear AEU */
9132                         REG_WR(bp,
9133                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9134                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9135                         msleep(10);
9136
9137                         /* save NIG port swap info */
9138                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9139                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9140                         /* reset device */
9141                         REG_WR(bp,
9142                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9143                                0xd3ffffff);
9144                         REG_WR(bp,
9145                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9146                                0x1403);
9147                         /* take the NIG out of reset and restore swap values */
9148                         REG_WR(bp,
9149                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9150                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
9151                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9152                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9153
9154                         /* send unload done to the MCP */
9155                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9156
9157                         /* restore our func and fw_seq */
9158                         bp->func = func;
9159                         bp->fw_seq =
9160                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9161                                 DRV_MSG_SEQ_NUMBER_MASK);
9162
9163                 } else
9164                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9165         }
9166 }
9167
9168 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9169 {
9170         u32 val, val2, val3, val4, id;
9171         u16 pmc;
9172
9173         /* Get the chip revision id and number. */
9174         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9175         val = REG_RD(bp, MISC_REG_CHIP_NUM);
9176         id = ((val & 0xffff) << 16);
9177         val = REG_RD(bp, MISC_REG_CHIP_REV);
9178         id |= ((val & 0xf) << 12);
9179         val = REG_RD(bp, MISC_REG_CHIP_METAL);
9180         id |= ((val & 0xff) << 4);
9181         val = REG_RD(bp, MISC_REG_BOND_ID);
9182         id |= (val & 0xf);
9183         bp->common.chip_id = id;
9184         bp->link_params.chip_id = bp->common.chip_id;
9185         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9186
9187         val = (REG_RD(bp, 0x2874) & 0x55);
9188         if ((bp->common.chip_id & 0x1) ||
9189             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9190                 bp->flags |= ONE_PORT_FLAG;
9191                 BNX2X_DEV_INFO("single port device\n");
9192         }
9193
9194         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9195         bp->common.flash_size = (NVRAM_1MB_SIZE <<
9196                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
9197         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9198                        bp->common.flash_size, bp->common.flash_size);
9199
9200         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9201         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9202         bp->link_params.shmem_base = bp->common.shmem_base;
9203         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
9204                        bp->common.shmem_base, bp->common.shmem2_base);
9205
9206         if (!bp->common.shmem_base ||
9207             (bp->common.shmem_base < 0xA0000) ||
9208             (bp->common.shmem_base >= 0xC0000)) {
9209                 BNX2X_DEV_INFO("MCP not active\n");
9210                 bp->flags |= NO_MCP_FLAG;
9211                 return;
9212         }
9213
9214         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9215         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9216                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9217                 BNX2X_ERROR("BAD MCP validity signature\n");
9218
9219         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9220         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9221
9222         bp->link_params.hw_led_mode = ((bp->common.hw_config &
9223                                         SHARED_HW_CFG_LED_MODE_MASK) >>
9224                                        SHARED_HW_CFG_LED_MODE_SHIFT);
9225
9226         bp->link_params.feature_config_flags = 0;
9227         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9228         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9229                 bp->link_params.feature_config_flags |=
9230                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9231         else
9232                 bp->link_params.feature_config_flags &=
9233                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9234
9235         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9236         bp->common.bc_ver = val;
9237         BNX2X_DEV_INFO("bc_ver %X\n", val);
9238         if (val < BNX2X_BC_VER) {
9239                 /* for now only warn
9240                  * later we might need to enforce this */
9241                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9242                             "please upgrade BC\n", BNX2X_BC_VER, val);
9243         }
9244         bp->link_params.feature_config_flags |=
9245                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9246                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9247
9248         if (BP_E1HVN(bp) == 0) {
9249                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9250                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9251         } else {
9252                 /* no WOL capability for E1HVN != 0 */
9253                 bp->flags |= NO_WOL_FLAG;
9254         }
9255         BNX2X_DEV_INFO("%sWoL capable\n",
9256                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
9257
9258         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9259         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9260         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9261         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9262
9263         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9264                  val, val2, val3, val4);
9265 }
9266
9267 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9268                                                     u32 switch_cfg)
9269 {
9270         int port = BP_PORT(bp);
9271         u32 ext_phy_type;
9272
9273         switch (switch_cfg) {
9274         case SWITCH_CFG_1G:
9275                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9276
9277                 ext_phy_type =
9278                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9279                 switch (ext_phy_type) {
9280                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9281                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9282                                        ext_phy_type);
9283
9284                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9285                                                SUPPORTED_10baseT_Full |
9286                                                SUPPORTED_100baseT_Half |
9287                                                SUPPORTED_100baseT_Full |
9288                                                SUPPORTED_1000baseT_Full |
9289                                                SUPPORTED_2500baseX_Full |
9290                                                SUPPORTED_TP |
9291                                                SUPPORTED_FIBRE |
9292                                                SUPPORTED_Autoneg |
9293                                                SUPPORTED_Pause |
9294                                                SUPPORTED_Asym_Pause);
9295                         break;
9296
9297                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9298                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9299                                        ext_phy_type);
9300
9301                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9302                                                SUPPORTED_10baseT_Full |
9303                                                SUPPORTED_100baseT_Half |
9304                                                SUPPORTED_100baseT_Full |
9305                                                SUPPORTED_1000baseT_Full |
9306                                                SUPPORTED_TP |
9307                                                SUPPORTED_FIBRE |
9308                                                SUPPORTED_Autoneg |
9309                                                SUPPORTED_Pause |
9310                                                SUPPORTED_Asym_Pause);
9311                         break;
9312
9313                 default:
9314                         BNX2X_ERR("NVRAM config error. "
9315                                   "BAD SerDes ext_phy_config 0x%x\n",
9316                                   bp->link_params.ext_phy_config);
9317                         return;
9318                 }
9319
9320                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9321                                            port*0x10);
9322                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9323                 break;
9324
9325         case SWITCH_CFG_10G:
9326                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9327
9328                 ext_phy_type =
9329                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9330                 switch (ext_phy_type) {
9331                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9332                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9333                                        ext_phy_type);
9334
9335                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9336                                                SUPPORTED_10baseT_Full |
9337                                                SUPPORTED_100baseT_Half |
9338                                                SUPPORTED_100baseT_Full |
9339                                                SUPPORTED_1000baseT_Full |
9340                                                SUPPORTED_2500baseX_Full |
9341                                                SUPPORTED_10000baseT_Full |
9342                                                SUPPORTED_TP |
9343                                                SUPPORTED_FIBRE |
9344                                                SUPPORTED_Autoneg |
9345                                                SUPPORTED_Pause |
9346                                                SUPPORTED_Asym_Pause);
9347                         break;
9348
9349                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9350                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9351                                        ext_phy_type);
9352
9353                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9354                                                SUPPORTED_1000baseT_Full |
9355                                                SUPPORTED_FIBRE |
9356                                                SUPPORTED_Autoneg |
9357                                                SUPPORTED_Pause |
9358                                                SUPPORTED_Asym_Pause);
9359                         break;
9360
9361                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9362                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9363                                        ext_phy_type);
9364
9365                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9366                                                SUPPORTED_2500baseX_Full |
9367                                                SUPPORTED_1000baseT_Full |
9368                                                SUPPORTED_FIBRE |
9369                                                SUPPORTED_Autoneg |
9370                                                SUPPORTED_Pause |
9371                                                SUPPORTED_Asym_Pause);
9372                         break;
9373
9374                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9375                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9376                                        ext_phy_type);
9377
9378                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9379                                                SUPPORTED_FIBRE |
9380                                                SUPPORTED_Pause |
9381                                                SUPPORTED_Asym_Pause);
9382                         break;
9383
9384                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9385                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9386                                        ext_phy_type);
9387
9388                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9389                                                SUPPORTED_1000baseT_Full |
9390                                                SUPPORTED_FIBRE |
9391                                                SUPPORTED_Pause |
9392                                                SUPPORTED_Asym_Pause);
9393                         break;
9394
9395                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9396                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9397                                        ext_phy_type);
9398
9399                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9400                                                SUPPORTED_1000baseT_Full |
9401                                                SUPPORTED_Autoneg |
9402                                                SUPPORTED_FIBRE |
9403                                                SUPPORTED_Pause |
9404                                                SUPPORTED_Asym_Pause);
9405                         break;
9406
9407                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9408                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9409                                        ext_phy_type);
9410
9411                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9412                                                SUPPORTED_1000baseT_Full |
9413                                                SUPPORTED_Autoneg |
9414                                                SUPPORTED_FIBRE |
9415                                                SUPPORTED_Pause |
9416                                                SUPPORTED_Asym_Pause);
9417                         break;
9418
9419                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9420                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9421                                        ext_phy_type);
9422
9423                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9424                                                SUPPORTED_TP |
9425                                                SUPPORTED_Autoneg |
9426                                                SUPPORTED_Pause |
9427                                                SUPPORTED_Asym_Pause);
9428                         break;
9429
9430                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9431                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9432                                        ext_phy_type);
9433
9434                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9435                                                SUPPORTED_10baseT_Full |
9436                                                SUPPORTED_100baseT_Half |
9437                                                SUPPORTED_100baseT_Full |
9438                                                SUPPORTED_1000baseT_Full |
9439                                                SUPPORTED_10000baseT_Full |
9440                                                SUPPORTED_TP |
9441                                                SUPPORTED_Autoneg |
9442                                                SUPPORTED_Pause |
9443                                                SUPPORTED_Asym_Pause);
9444                         break;
9445
9446                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9447                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9448                                   bp->link_params.ext_phy_config);
9449                         break;
9450
9451                 default:
9452                         BNX2X_ERR("NVRAM config error. "
9453                                   "BAD XGXS ext_phy_config 0x%x\n",
9454                                   bp->link_params.ext_phy_config);
9455                         return;
9456                 }
9457
9458                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9459                                            port*0x18);
9460                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9461
9462                 break;
9463
9464         default:
9465                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9466                           bp->port.link_config);
9467                 return;
9468         }
9469         bp->link_params.phy_addr = bp->port.phy_addr;
9470
9471         /* mask what we support according to speed_cap_mask */
9472         if (!(bp->link_params.speed_cap_mask &
9473                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9474                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9475
9476         if (!(bp->link_params.speed_cap_mask &
9477                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9478                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9479
9480         if (!(bp->link_params.speed_cap_mask &
9481                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9482                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9483
9484         if (!(bp->link_params.speed_cap_mask &
9485                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9486                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9487
9488         if (!(bp->link_params.speed_cap_mask &
9489                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9490                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9491                                         SUPPORTED_1000baseT_Full);
9492
9493         if (!(bp->link_params.speed_cap_mask &
9494                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9495                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9496
9497         if (!(bp->link_params.speed_cap_mask &
9498                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9499                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9500
9501         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9502 }
9503
9504 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9505 {
9506         bp->link_params.req_duplex = DUPLEX_FULL;
9507
9508         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9509         case PORT_FEATURE_LINK_SPEED_AUTO:
9510                 if (bp->port.supported & SUPPORTED_Autoneg) {
9511                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9512                         bp->port.advertising = bp->port.supported;
9513                 } else {
9514                         u32 ext_phy_type =
9515                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9516
9517                         if ((ext_phy_type ==
9518                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9519                             (ext_phy_type ==
9520                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9521                                 /* force 10G, no AN */
9522                                 bp->link_params.req_line_speed = SPEED_10000;
9523                                 bp->port.advertising =
9524                                                 (ADVERTISED_10000baseT_Full |
9525                                                  ADVERTISED_FIBRE);
9526                                 break;
9527                         }
9528                         BNX2X_ERR("NVRAM config error. "
9529                                   "Invalid link_config 0x%x"
9530                                   "  Autoneg not supported\n",
9531                                   bp->port.link_config);
9532                         return;
9533                 }
9534                 break;
9535
9536         case PORT_FEATURE_LINK_SPEED_10M_FULL:
9537                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9538                         bp->link_params.req_line_speed = SPEED_10;
9539                         bp->port.advertising = (ADVERTISED_10baseT_Full |
9540                                                 ADVERTISED_TP);
9541                 } else {
9542                         BNX2X_ERROR("NVRAM config error. "
9543                                     "Invalid link_config 0x%x"
9544                                     "  speed_cap_mask 0x%x\n",
9545                                     bp->port.link_config,
9546                                     bp->link_params.speed_cap_mask);
9547                         return;
9548                 }
9549                 break;
9550
9551         case PORT_FEATURE_LINK_SPEED_10M_HALF:
9552                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9553                         bp->link_params.req_line_speed = SPEED_10;
9554                         bp->link_params.req_duplex = DUPLEX_HALF;
9555                         bp->port.advertising = (ADVERTISED_10baseT_Half |
9556                                                 ADVERTISED_TP);
9557                 } else {
9558                         BNX2X_ERROR("NVRAM config error. "
9559                                     "Invalid link_config 0x%x"
9560                                     "  speed_cap_mask 0x%x\n",
9561                                     bp->port.link_config,
9562                                     bp->link_params.speed_cap_mask);
9563                         return;
9564                 }
9565                 break;
9566
9567         case PORT_FEATURE_LINK_SPEED_100M_FULL:
9568                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9569                         bp->link_params.req_line_speed = SPEED_100;
9570                         bp->port.advertising = (ADVERTISED_100baseT_Full |
9571                                                 ADVERTISED_TP);
9572                 } else {
9573                         BNX2X_ERROR("NVRAM config error. "
9574                                     "Invalid link_config 0x%x"
9575                                     "  speed_cap_mask 0x%x\n",
9576                                     bp->port.link_config,
9577                                     bp->link_params.speed_cap_mask);
9578                         return;
9579                 }
9580                 break;
9581
9582         case PORT_FEATURE_LINK_SPEED_100M_HALF:
9583                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9584                         bp->link_params.req_line_speed = SPEED_100;
9585                         bp->link_params.req_duplex = DUPLEX_HALF;
9586                         bp->port.advertising = (ADVERTISED_100baseT_Half |
9587                                                 ADVERTISED_TP);
9588                 } else {
9589                         BNX2X_ERROR("NVRAM config error. "
9590                                     "Invalid link_config 0x%x"
9591                                     "  speed_cap_mask 0x%x\n",
9592                                     bp->port.link_config,
9593                                     bp->link_params.speed_cap_mask);
9594                         return;
9595                 }
9596                 break;
9597
9598         case PORT_FEATURE_LINK_SPEED_1G:
9599                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9600                         bp->link_params.req_line_speed = SPEED_1000;
9601                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
9602                                                 ADVERTISED_TP);
9603                 } else {
9604                         BNX2X_ERROR("NVRAM config error. "
9605                                     "Invalid link_config 0x%x"
9606                                     "  speed_cap_mask 0x%x\n",
9607                                     bp->port.link_config,
9608                                     bp->link_params.speed_cap_mask);
9609                         return;
9610                 }
9611                 break;
9612
9613         case PORT_FEATURE_LINK_SPEED_2_5G:
9614                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9615                         bp->link_params.req_line_speed = SPEED_2500;
9616                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
9617                                                 ADVERTISED_TP);
9618                 } else {
9619                         BNX2X_ERROR("NVRAM config error. "
9620                                     "Invalid link_config 0x%x"
9621                                     "  speed_cap_mask 0x%x\n",
9622                                     bp->port.link_config,
9623                                     bp->link_params.speed_cap_mask);
9624                         return;
9625                 }
9626                 break;
9627
9628         case PORT_FEATURE_LINK_SPEED_10G_CX4:
9629         case PORT_FEATURE_LINK_SPEED_10G_KX4:
9630         case PORT_FEATURE_LINK_SPEED_10G_KR:
9631                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9632                         bp->link_params.req_line_speed = SPEED_10000;
9633                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
9634                                                 ADVERTISED_FIBRE);
9635                 } else {
9636                         BNX2X_ERROR("NVRAM config error. "
9637                                     "Invalid link_config 0x%x"
9638                                     "  speed_cap_mask 0x%x\n",
9639                                     bp->port.link_config,
9640                                     bp->link_params.speed_cap_mask);
9641                         return;
9642                 }
9643                 break;
9644
9645         default:
9646                 BNX2X_ERROR("NVRAM config error. "
9647                             "BAD link speed link_config 0x%x\n",
9648                             bp->port.link_config);
9649                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9650                 bp->port.advertising = bp->port.supported;
9651                 break;
9652         }
9653
9654         bp->link_params.req_flow_ctrl = (bp->port.link_config &
9655                                          PORT_FEATURE_FLOW_CONTROL_MASK);
9656         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9657             !(bp->port.supported & SUPPORTED_Autoneg))
9658                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9659
9660         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
9661                        "  advertising 0x%x\n",
9662                        bp->link_params.req_line_speed,
9663                        bp->link_params.req_duplex,
9664                        bp->link_params.req_flow_ctrl, bp->port.advertising);
9665 }
9666
9667 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9668 {
9669         mac_hi = cpu_to_be16(mac_hi);
9670         mac_lo = cpu_to_be32(mac_lo);
9671         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9672         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9673 }
9674
9675 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9676 {
9677         int port = BP_PORT(bp);
9678         u32 val, val2;
9679         u32 config;
9680         u16 i;
9681         u32 ext_phy_type;
9682
9683         bp->link_params.bp = bp;
9684         bp->link_params.port = port;
9685
9686         bp->link_params.lane_config =
9687                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9688         bp->link_params.ext_phy_config =
9689                 SHMEM_RD(bp,
9690                          dev_info.port_hw_config[port].external_phy_config);
9691         /* BCM8727_NOC => BCM8727 no over current */
9692         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9693             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9694                 bp->link_params.ext_phy_config &=
9695                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9696                 bp->link_params.ext_phy_config |=
9697                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9698                 bp->link_params.feature_config_flags |=
9699                         FEATURE_CONFIG_BCM8727_NOC;
9700         }
9701
9702         bp->link_params.speed_cap_mask =
9703                 SHMEM_RD(bp,
9704                          dev_info.port_hw_config[port].speed_capability_mask);
9705
9706         bp->port.link_config =
9707                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9708
9709         /* Get the 4 lanes xgxs config rx and tx */
9710         for (i = 0; i < 2; i++) {
9711                 val = SHMEM_RD(bp,
9712                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9713                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9714                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9715
9716                 val = SHMEM_RD(bp,
9717                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9718                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9719                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9720         }
9721
9722         /* If the device is capable of WoL, set the default state according
9723          * to the HW
9724          */
9725         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9726         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9727                    (config & PORT_FEATURE_WOL_ENABLED));
9728
9729         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
9730                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
9731                        bp->link_params.lane_config,
9732                        bp->link_params.ext_phy_config,
9733                        bp->link_params.speed_cap_mask, bp->port.link_config);
9734
9735         bp->link_params.switch_cfg |= (bp->port.link_config &
9736                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
9737         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9738
9739         bnx2x_link_settings_requested(bp);
9740
9741         /*
9742          * If connected directly, work with the internal PHY, otherwise, work
9743          * with the external PHY
9744          */
9745         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9746         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9747                 bp->mdio.prtad = bp->link_params.phy_addr;
9748
9749         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9750                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9751                 bp->mdio.prtad =
9752                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9753
9754         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9755         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9756         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9757         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9758         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9759
9760 #ifdef BCM_CNIC
9761         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9762         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9763         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9764 #endif
9765 }
9766
9767 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9768 {
9769         int func = BP_FUNC(bp);
9770         u32 val, val2;
9771         int rc = 0;
9772
9773         bnx2x_get_common_hwinfo(bp);
9774
9775         bp->e1hov = 0;
9776         bp->e1hmf = 0;
9777         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9778                 bp->mf_config =
9779                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9780
9781                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9782                        FUNC_MF_CFG_E1HOV_TAG_MASK);
9783                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9784                         bp->e1hmf = 1;
9785                 BNX2X_DEV_INFO("%s function mode\n",
9786                                IS_E1HMF(bp) ? "multi" : "single");
9787
9788                 if (IS_E1HMF(bp)) {
9789                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9790                                                                 e1hov_tag) &
9791                                FUNC_MF_CFG_E1HOV_TAG_MASK);
9792                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9793                                 bp->e1hov = val;
9794                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9795                                                "(0x%04x)\n",
9796                                                func, bp->e1hov, bp->e1hov);
9797                         } else {
9798                                 BNX2X_ERROR("No valid E1HOV for func %d,"
9799                                             "  aborting\n", func);
9800                                 rc = -EPERM;
9801                         }
9802                 } else {
9803                         if (BP_E1HVN(bp)) {
9804                                 BNX2X_ERROR("VN %d in single function mode,"
9805                                             "  aborting\n", BP_E1HVN(bp));
9806                                 rc = -EPERM;
9807                         }
9808                 }
9809         }
9810
9811         if (!BP_NOMCP(bp)) {
9812                 bnx2x_get_port_hwinfo(bp);
9813
9814                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9815                               DRV_MSG_SEQ_NUMBER_MASK);
9816                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9817         }
9818
9819         if (IS_E1HMF(bp)) {
9820                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9821                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
9822                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9823                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9824                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9825                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9826                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9827                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9828                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
9829                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
9830                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9831                                ETH_ALEN);
9832                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9833                                ETH_ALEN);
9834                 }
9835
9836                 return rc;
9837         }
9838
9839         if (BP_NOMCP(bp)) {
9840                 /* only supposed to happen on emulation/FPGA */
9841                 BNX2X_ERROR("warning: random MAC workaround active\n");
9842                 random_ether_addr(bp->dev->dev_addr);
9843                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9844         }
9845
9846         return rc;
9847 }
9848
9849 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9850 {
9851         int cnt, i, block_end, rodi;
9852         char vpd_data[BNX2X_VPD_LEN+1];
9853         char str_id_reg[VENDOR_ID_LEN+1];
9854         char str_id_cap[VENDOR_ID_LEN+1];
9855         u8 len;
9856
9857         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9858         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9859
9860         if (cnt < BNX2X_VPD_LEN)
9861                 goto out_not_found;
9862
9863         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9864                              PCI_VPD_LRDT_RO_DATA);
9865         if (i < 0)
9866                 goto out_not_found;
9867
9868
9869         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9870                     pci_vpd_lrdt_size(&vpd_data[i]);
9871
9872         i += PCI_VPD_LRDT_TAG_SIZE;
9873
9874         if (block_end > BNX2X_VPD_LEN)
9875                 goto out_not_found;
9876
9877         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9878                                    PCI_VPD_RO_KEYWORD_MFR_ID);
9879         if (rodi < 0)
9880                 goto out_not_found;
9881
9882         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9883
9884         if (len != VENDOR_ID_LEN)
9885                 goto out_not_found;
9886
9887         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9888
9889         /* vendor specific info */
9890         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9891         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9892         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9893             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9894
9895                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9896                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
9897                 if (rodi >= 0) {
9898                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9899
9900                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9901
9902                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9903                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9904                                 bp->fw_ver[len] = ' ';
9905                         }
9906                 }
9907                 return;
9908         }
9909 out_not_found:
9910         return;
9911 }
9912
9913 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9914 {
9915         int func = BP_FUNC(bp);
9916         int timer_interval;
9917         int rc;
9918
9919         /* Disable interrupt handling until HW is initialized */
9920         atomic_set(&bp->intr_sem, 1);
9921         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9922
9923         mutex_init(&bp->port.phy_mutex);
9924         mutex_init(&bp->fw_mb_mutex);
9925 #ifdef BCM_CNIC
9926         mutex_init(&bp->cnic_mutex);
9927 #endif
9928
9929         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9930         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9931
9932         rc = bnx2x_get_hwinfo(bp);
9933
9934         bnx2x_read_fwinfo(bp);
9935         /* need to reset chip if undi was active */
9936         if (!BP_NOMCP(bp))
9937                 bnx2x_undi_unload(bp);
9938
9939         if (CHIP_REV_IS_FPGA(bp))
9940                 dev_err(&bp->pdev->dev, "FPGA detected\n");
9941
9942         if (BP_NOMCP(bp) && (func == 0))
9943                 dev_err(&bp->pdev->dev, "MCP disabled, "
9944                                         "must load devices in order!\n");
9945
9946         /* Set multi queue mode */
9947         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9948             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9949                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9950                                         "requested is not MSI-X\n");
9951                 multi_mode = ETH_RSS_MODE_DISABLED;
9952         }
9953         bp->multi_mode = multi_mode;
9954
9955
9956         bp->dev->features |= NETIF_F_GRO;
9957
9958         /* Set TPA flags */
9959         if (disable_tpa) {
9960                 bp->flags &= ~TPA_ENABLE_FLAG;
9961                 bp->dev->features &= ~NETIF_F_LRO;
9962         } else {
9963                 bp->flags |= TPA_ENABLE_FLAG;
9964                 bp->dev->features |= NETIF_F_LRO;
9965         }
9966
9967         if (CHIP_IS_E1(bp))
9968                 bp->dropless_fc = 0;
9969         else
9970                 bp->dropless_fc = dropless_fc;
9971
9972         bp->mrrs = mrrs;
9973
9974         bp->tx_ring_size = MAX_TX_AVAIL;
9975         bp->rx_ring_size = MAX_RX_AVAIL;
9976
9977         bp->rx_csum = 1;
9978
9979         /* make sure that the numbers are in the right granularity */
9980         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9981         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9982
9983         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9984         bp->current_interval = (poll ? poll : timer_interval);
9985
9986         init_timer(&bp->timer);
9987         bp->timer.expires = jiffies + bp->current_interval;
9988         bp->timer.data = (unsigned long) bp;
9989         bp->timer.function = bnx2x_timer;
9990
9991         return rc;
9992 }
9993
9994 /*
9995  * ethtool service functions
9996  */
9997
9998 /* All ethtool functions called with rtnl_lock */
9999
10000 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10001 {
10002         struct bnx2x *bp = netdev_priv(dev);
10003
10004         cmd->supported = bp->port.supported;
10005         cmd->advertising = bp->port.advertising;
10006
10007         if ((bp->state == BNX2X_STATE_OPEN) &&
10008             !(bp->flags & MF_FUNC_DIS) &&
10009             (bp->link_vars.link_up)) {
10010                 cmd->speed = bp->link_vars.line_speed;
10011                 cmd->duplex = bp->link_vars.duplex;
10012                 if (IS_E1HMF(bp)) {
10013                         u16 vn_max_rate;
10014
10015                         vn_max_rate =
10016                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10017                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10018                         if (vn_max_rate < cmd->speed)
10019                                 cmd->speed = vn_max_rate;
10020                 }
10021         } else {
10022                 cmd->speed = -1;
10023                 cmd->duplex = -1;
10024         }
10025
10026         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10027                 u32 ext_phy_type =
10028                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10029
10030                 switch (ext_phy_type) {
10031                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10032                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10033                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10034                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10035                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10036                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10037                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10038                         cmd->port = PORT_FIBRE;
10039                         break;
10040
10041                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10042                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10043                         cmd->port = PORT_TP;
10044                         break;
10045
10046                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10047                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10048                                   bp->link_params.ext_phy_config);
10049                         break;
10050
10051                 default:
10052                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10053                            bp->link_params.ext_phy_config);
10054                         break;
10055                 }
10056         } else
10057                 cmd->port = PORT_TP;
10058
10059         cmd->phy_address = bp->mdio.prtad;
10060         cmd->transceiver = XCVR_INTERNAL;
10061
10062         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10063                 cmd->autoneg = AUTONEG_ENABLE;
10064         else
10065                 cmd->autoneg = AUTONEG_DISABLE;
10066
10067         cmd->maxtxpkt = 0;
10068         cmd->maxrxpkt = 0;
10069
10070         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10071            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10072            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10073            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10074            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10075            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10076            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10077
10078         return 0;
10079 }
10080
10081 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10082 {
10083         struct bnx2x *bp = netdev_priv(dev);
10084         u32 advertising;
10085
10086         if (IS_E1HMF(bp))
10087                 return 0;
10088
10089         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10090            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10091            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10092            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10093            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10094            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10095            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10096
10097         if (cmd->autoneg == AUTONEG_ENABLE) {
10098                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10099                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10100                         return -EINVAL;
10101                 }
10102
10103                 /* advertise the requested speed and duplex if supported */
10104                 cmd->advertising &= bp->port.supported;
10105
10106                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10107                 bp->link_params.req_duplex = DUPLEX_FULL;
10108                 bp->port.advertising |= (ADVERTISED_Autoneg |
10109                                          cmd->advertising);
10110
10111         } else { /* forced speed */
10112                 /* advertise the requested speed and duplex if supported */
10113                 switch (cmd->speed) {
10114                 case SPEED_10:
10115                         if (cmd->duplex == DUPLEX_FULL) {
10116                                 if (!(bp->port.supported &
10117                                       SUPPORTED_10baseT_Full)) {
10118                                         DP(NETIF_MSG_LINK,
10119                                            "10M full not supported\n");
10120                                         return -EINVAL;
10121                                 }
10122
10123                                 advertising = (ADVERTISED_10baseT_Full |
10124                                                ADVERTISED_TP);
10125                         } else {
10126                                 if (!(bp->port.supported &
10127                                       SUPPORTED_10baseT_Half)) {
10128                                         DP(NETIF_MSG_LINK,
10129                                            "10M half not supported\n");
10130                                         return -EINVAL;
10131                                 }
10132
10133                                 advertising = (ADVERTISED_10baseT_Half |
10134                                                ADVERTISED_TP);
10135                         }
10136                         break;
10137
10138                 case SPEED_100:
10139                         if (cmd->duplex == DUPLEX_FULL) {
10140                                 if (!(bp->port.supported &
10141                                                 SUPPORTED_100baseT_Full)) {
10142                                         DP(NETIF_MSG_LINK,
10143                                            "100M full not supported\n");
10144                                         return -EINVAL;
10145                                 }
10146
10147                                 advertising = (ADVERTISED_100baseT_Full |
10148                                                ADVERTISED_TP);
10149                         } else {
10150                                 if (!(bp->port.supported &
10151                                                 SUPPORTED_100baseT_Half)) {
10152                                         DP(NETIF_MSG_LINK,
10153                                            "100M half not supported\n");
10154                                         return -EINVAL;
10155                                 }
10156
10157                                 advertising = (ADVERTISED_100baseT_Half |
10158                                                ADVERTISED_TP);
10159                         }
10160                         break;
10161
10162                 case SPEED_1000:
10163                         if (cmd->duplex != DUPLEX_FULL) {
10164                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
10165                                 return -EINVAL;
10166                         }
10167
10168                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10169                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
10170                                 return -EINVAL;
10171                         }
10172
10173                         advertising = (ADVERTISED_1000baseT_Full |
10174                                        ADVERTISED_TP);
10175                         break;
10176
10177                 case SPEED_2500:
10178                         if (cmd->duplex != DUPLEX_FULL) {
10179                                 DP(NETIF_MSG_LINK,
10180                                    "2.5G half not supported\n");
10181                                 return -EINVAL;
10182                         }
10183
10184                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10185                                 DP(NETIF_MSG_LINK,
10186                                    "2.5G full not supported\n");
10187                                 return -EINVAL;
10188                         }
10189
10190                         advertising = (ADVERTISED_2500baseX_Full |
10191                                        ADVERTISED_TP);
10192                         break;
10193
10194                 case SPEED_10000:
10195                         if (cmd->duplex != DUPLEX_FULL) {
10196                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
10197                                 return -EINVAL;
10198                         }
10199
10200                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10201                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
10202                                 return -EINVAL;
10203                         }
10204
10205                         advertising = (ADVERTISED_10000baseT_Full |
10206                                        ADVERTISED_FIBRE);
10207                         break;
10208
10209                 default:
10210                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
10211                         return -EINVAL;
10212                 }
10213
10214                 bp->link_params.req_line_speed = cmd->speed;
10215                 bp->link_params.req_duplex = cmd->duplex;
10216                 bp->port.advertising = advertising;
10217         }
10218
10219         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10220            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
10221            bp->link_params.req_line_speed, bp->link_params.req_duplex,
10222            bp->port.advertising);
10223
10224         if (netif_running(dev)) {
10225                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10226                 bnx2x_link_set(bp);
10227         }
10228
10229         return 0;
10230 }
10231
10232 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10233 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10234
10235 static int bnx2x_get_regs_len(struct net_device *dev)
10236 {
10237         struct bnx2x *bp = netdev_priv(dev);
10238         int regdump_len = 0;
10239         int i;
10240
10241         if (CHIP_IS_E1(bp)) {
10242                 for (i = 0; i < REGS_COUNT; i++)
10243                         if (IS_E1_ONLINE(reg_addrs[i].info))
10244                                 regdump_len += reg_addrs[i].size;
10245
10246                 for (i = 0; i < WREGS_COUNT_E1; i++)
10247                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10248                                 regdump_len += wreg_addrs_e1[i].size *
10249                                         (1 + wreg_addrs_e1[i].read_regs_count);
10250
10251         } else { /* E1H */
10252                 for (i = 0; i < REGS_COUNT; i++)
10253                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10254                                 regdump_len += reg_addrs[i].size;
10255
10256                 for (i = 0; i < WREGS_COUNT_E1H; i++)
10257                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10258                                 regdump_len += wreg_addrs_e1h[i].size *
10259                                         (1 + wreg_addrs_e1h[i].read_regs_count);
10260         }
10261         regdump_len *= 4;
10262         regdump_len += sizeof(struct dump_hdr);
10263
10264         return regdump_len;
10265 }
10266
10267 static void bnx2x_get_regs(struct net_device *dev,
10268                            struct ethtool_regs *regs, void *_p)
10269 {
10270         u32 *p = _p, i, j;
10271         struct bnx2x *bp = netdev_priv(dev);
10272         struct dump_hdr dump_hdr = {0};
10273
10274         regs->version = 0;
10275         memset(p, 0, regs->len);
10276
10277         if (!netif_running(bp->dev))
10278                 return;
10279
10280         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10281         dump_hdr.dump_sign = dump_sign_all;
10282         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10283         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10284         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10285         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10286         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10287
10288         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10289         p += dump_hdr.hdr_size + 1;
10290
10291         if (CHIP_IS_E1(bp)) {
10292                 for (i = 0; i < REGS_COUNT; i++)
10293                         if (IS_E1_ONLINE(reg_addrs[i].info))
10294                                 for (j = 0; j < reg_addrs[i].size; j++)
10295                                         *p++ = REG_RD(bp,
10296                                                       reg_addrs[i].addr + j*4);
10297
10298         } else { /* E1H */
10299                 for (i = 0; i < REGS_COUNT; i++)
10300                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10301                                 for (j = 0; j < reg_addrs[i].size; j++)
10302                                         *p++ = REG_RD(bp,
10303                                                       reg_addrs[i].addr + j*4);
10304         }
10305 }
10306
10307 #define PHY_FW_VER_LEN                  10
10308
10309 static void bnx2x_get_drvinfo(struct net_device *dev,
10310                               struct ethtool_drvinfo *info)
10311 {
10312         struct bnx2x *bp = netdev_priv(dev);
10313         u8 phy_fw_ver[PHY_FW_VER_LEN];
10314
10315         strcpy(info->driver, DRV_MODULE_NAME);
10316         strcpy(info->version, DRV_MODULE_VERSION);
10317
10318         phy_fw_ver[0] = '\0';
10319         if (bp->port.pmf) {
10320                 bnx2x_acquire_phy_lock(bp);
10321                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10322                                              (bp->state != BNX2X_STATE_CLOSED),
10323                                              phy_fw_ver, PHY_FW_VER_LEN);
10324                 bnx2x_release_phy_lock(bp);
10325         }
10326
10327         strncpy(info->fw_version, bp->fw_ver, 32);
10328         snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10329                  "bc %d.%d.%d%s%s",
10330                  (bp->common.bc_ver & 0xff0000) >> 16,
10331                  (bp->common.bc_ver & 0xff00) >> 8,
10332                  (bp->common.bc_ver & 0xff),
10333                  ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10334         strcpy(info->bus_info, pci_name(bp->pdev));
10335         info->n_stats = BNX2X_NUM_STATS;
10336         info->testinfo_len = BNX2X_NUM_TESTS;
10337         info->eedump_len = bp->common.flash_size;
10338         info->regdump_len = bnx2x_get_regs_len(dev);
10339 }
10340
10341 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10342 {
10343         struct bnx2x *bp = netdev_priv(dev);
10344
10345         if (bp->flags & NO_WOL_FLAG) {
10346                 wol->supported = 0;
10347                 wol->wolopts = 0;
10348         } else {
10349                 wol->supported = WAKE_MAGIC;
10350                 if (bp->wol)
10351                         wol->wolopts = WAKE_MAGIC;
10352                 else
10353                         wol->wolopts = 0;
10354         }
10355         memset(&wol->sopass, 0, sizeof(wol->sopass));
10356 }
10357
10358 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10359 {
10360         struct bnx2x *bp = netdev_priv(dev);
10361
10362         if (wol->wolopts & ~WAKE_MAGIC)
10363                 return -EINVAL;
10364
10365         if (wol->wolopts & WAKE_MAGIC) {
10366                 if (bp->flags & NO_WOL_FLAG)
10367                         return -EINVAL;
10368
10369                 bp->wol = 1;
10370         } else
10371                 bp->wol = 0;
10372
10373         return 0;
10374 }
10375
10376 static u32 bnx2x_get_msglevel(struct net_device *dev)
10377 {
10378         struct bnx2x *bp = netdev_priv(dev);
10379
10380         return bp->msg_enable;
10381 }
10382
10383 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10384 {
10385         struct bnx2x *bp = netdev_priv(dev);
10386
10387         if (capable(CAP_NET_ADMIN))
10388                 bp->msg_enable = level;
10389 }
10390
10391 static int bnx2x_nway_reset(struct net_device *dev)
10392 {
10393         struct bnx2x *bp = netdev_priv(dev);
10394
10395         if (!bp->port.pmf)
10396                 return 0;
10397
10398         if (netif_running(dev)) {
10399                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10400                 bnx2x_link_set(bp);
10401         }
10402
10403         return 0;
10404 }
10405
10406 static u32 bnx2x_get_link(struct net_device *dev)
10407 {
10408         struct bnx2x *bp = netdev_priv(dev);
10409
10410         if (bp->flags & MF_FUNC_DIS)
10411                 return 0;
10412
10413         return bp->link_vars.link_up;
10414 }
10415
10416 static int bnx2x_get_eeprom_len(struct net_device *dev)
10417 {
10418         struct bnx2x *bp = netdev_priv(dev);
10419
10420         return bp->common.flash_size;
10421 }
10422
10423 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10424 {
10425         int port = BP_PORT(bp);
10426         int count, i;
10427         u32 val = 0;
10428
10429         /* adjust timeout for emulation/FPGA */
10430         count = NVRAM_TIMEOUT_COUNT;
10431         if (CHIP_REV_IS_SLOW(bp))
10432                 count *= 100;
10433
10434         /* request access to nvram interface */
10435         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10436                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10437
10438         for (i = 0; i < count*10; i++) {
10439                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10440                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10441                         break;
10442
10443                 udelay(5);
10444         }
10445
10446         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10447                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10448                 return -EBUSY;
10449         }
10450
10451         return 0;
10452 }
10453
10454 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10455 {
10456         int port = BP_PORT(bp);
10457         int count, i;
10458         u32 val = 0;
10459
10460         /* adjust timeout for emulation/FPGA */
10461         count = NVRAM_TIMEOUT_COUNT;
10462         if (CHIP_REV_IS_SLOW(bp))
10463                 count *= 100;
10464
10465         /* relinquish nvram interface */
10466         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10467                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10468
10469         for (i = 0; i < count*10; i++) {
10470                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10471                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10472                         break;
10473
10474                 udelay(5);
10475         }
10476
10477         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10478                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10479                 return -EBUSY;
10480         }
10481
10482         return 0;
10483 }
10484
10485 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10486 {
10487         u32 val;
10488
10489         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10490
10491         /* enable both bits, even on read */
10492         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10493                (val | MCPR_NVM_ACCESS_ENABLE_EN |
10494                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
10495 }
10496
10497 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10498 {
10499         u32 val;
10500
10501         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10502
10503         /* disable both bits, even after read */
10504         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10505                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10506                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10507 }
10508
10509 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10510                                   u32 cmd_flags)
10511 {
10512         int count, i, rc;
10513         u32 val;
10514
10515         /* build the command word */
10516         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10517
10518         /* need to clear DONE bit separately */
10519         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10520
10521         /* address of the NVRAM to read from */
10522         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10523                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10524
10525         /* issue a read command */
10526         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10527
10528         /* adjust timeout for emulation/FPGA */
10529         count = NVRAM_TIMEOUT_COUNT;
10530         if (CHIP_REV_IS_SLOW(bp))
10531                 count *= 100;
10532
10533         /* wait for completion */
10534         *ret_val = 0;
10535         rc = -EBUSY;
10536         for (i = 0; i < count; i++) {
10537                 udelay(5);
10538                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10539
10540                 if (val & MCPR_NVM_COMMAND_DONE) {
10541                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10542                         /* we read nvram data in cpu order
10543                          * but ethtool sees it as an array of bytes
10544                          * converting to big-endian will do the work */
10545                         *ret_val = cpu_to_be32(val);
10546                         rc = 0;
10547                         break;
10548                 }
10549         }
10550
10551         return rc;
10552 }
10553
10554 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10555                             int buf_size)
10556 {
10557         int rc;
10558         u32 cmd_flags;
10559         __be32 val;
10560
10561         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10562                 DP(BNX2X_MSG_NVM,
10563                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10564                    offset, buf_size);
10565                 return -EINVAL;
10566         }
10567
10568         if (offset + buf_size > bp->common.flash_size) {
10569                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10570                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10571                    offset, buf_size, bp->common.flash_size);
10572                 return -EINVAL;
10573         }
10574
10575         /* request access to nvram interface */
10576         rc = bnx2x_acquire_nvram_lock(bp);
10577         if (rc)
10578                 return rc;
10579
10580         /* enable access to nvram interface */
10581         bnx2x_enable_nvram_access(bp);
10582
10583         /* read the first word(s) */
10584         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10585         while ((buf_size > sizeof(u32)) && (rc == 0)) {
10586                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10587                 memcpy(ret_buf, &val, 4);
10588
10589                 /* advance to the next dword */
10590                 offset += sizeof(u32);
10591                 ret_buf += sizeof(u32);
10592                 buf_size -= sizeof(u32);
10593                 cmd_flags = 0;
10594         }
10595
10596         if (rc == 0) {
10597                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10598                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10599                 memcpy(ret_buf, &val, 4);
10600         }
10601
10602         /* disable access to nvram interface */
10603         bnx2x_disable_nvram_access(bp);
10604         bnx2x_release_nvram_lock(bp);
10605
10606         return rc;
10607 }
10608
10609 static int bnx2x_get_eeprom(struct net_device *dev,
10610                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10611 {
10612         struct bnx2x *bp = netdev_priv(dev);
10613         int rc;
10614
10615         if (!netif_running(dev))
10616                 return -EAGAIN;
10617
10618         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10619            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10620            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10621            eeprom->len, eeprom->len);
10622
10623         /* parameters already validated in ethtool_get_eeprom */
10624
10625         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10626
10627         return rc;
10628 }
10629
10630 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10631                                    u32 cmd_flags)
10632 {
10633         int count, i, rc;
10634
10635         /* build the command word */
10636         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10637
10638         /* need to clear DONE bit separately */
10639         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10640
10641         /* write the data */
10642         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10643
10644         /* address of the NVRAM to write to */
10645         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10646                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10647
10648         /* issue the write command */
10649         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10650
10651         /* adjust timeout for emulation/FPGA */
10652         count = NVRAM_TIMEOUT_COUNT;
10653         if (CHIP_REV_IS_SLOW(bp))
10654                 count *= 100;
10655
10656         /* wait for completion */
10657         rc = -EBUSY;
10658         for (i = 0; i < count; i++) {
10659                 udelay(5);
10660                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10661                 if (val & MCPR_NVM_COMMAND_DONE) {
10662                         rc = 0;
10663                         break;
10664                 }
10665         }
10666
10667         return rc;
10668 }
10669
10670 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
10671
10672 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10673                               int buf_size)
10674 {
10675         int rc;
10676         u32 cmd_flags;
10677         u32 align_offset;
10678         __be32 val;
10679
10680         if (offset + buf_size > bp->common.flash_size) {
10681                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10682                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10683                    offset, buf_size, bp->common.flash_size);
10684                 return -EINVAL;
10685         }
10686
10687         /* request access to nvram interface */
10688         rc = bnx2x_acquire_nvram_lock(bp);
10689         if (rc)
10690                 return rc;
10691
10692         /* enable access to nvram interface */
10693         bnx2x_enable_nvram_access(bp);
10694
10695         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10696         align_offset = (offset & ~0x03);
10697         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10698
10699         if (rc == 0) {
10700                 val &= ~(0xff << BYTE_OFFSET(offset));
10701                 val |= (*data_buf << BYTE_OFFSET(offset));
10702
10703                 /* nvram data is returned as an array of bytes
10704                  * convert it back to cpu order */
10705                 val = be32_to_cpu(val);
10706
10707                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10708                                              cmd_flags);
10709         }
10710
10711         /* disable access to nvram interface */
10712         bnx2x_disable_nvram_access(bp);
10713         bnx2x_release_nvram_lock(bp);
10714
10715         return rc;
10716 }
10717
10718 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10719                              int buf_size)
10720 {
10721         int rc;
10722         u32 cmd_flags;
10723         u32 val;
10724         u32 written_so_far;
10725
10726         if (buf_size == 1)      /* ethtool */
10727                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10728
10729         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10730                 DP(BNX2X_MSG_NVM,
10731                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10732                    offset, buf_size);
10733                 return -EINVAL;
10734         }
10735
10736         if (offset + buf_size > bp->common.flash_size) {
10737                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10738                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10739                    offset, buf_size, bp->common.flash_size);
10740                 return -EINVAL;
10741         }
10742
10743         /* request access to nvram interface */
10744         rc = bnx2x_acquire_nvram_lock(bp);
10745         if (rc)
10746                 return rc;
10747
10748         /* enable access to nvram interface */
10749         bnx2x_enable_nvram_access(bp);
10750
10751         written_so_far = 0;
10752         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10753         while ((written_so_far < buf_size) && (rc == 0)) {
10754                 if (written_so_far == (buf_size - sizeof(u32)))
10755                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10756                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10757                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10758                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10759                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10760
10761                 memcpy(&val, data_buf, 4);
10762
10763                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10764
10765                 /* advance to the next dword */
10766                 offset += sizeof(u32);
10767                 data_buf += sizeof(u32);
10768                 written_so_far += sizeof(u32);
10769                 cmd_flags = 0;
10770         }
10771
10772         /* disable access to nvram interface */
10773         bnx2x_disable_nvram_access(bp);
10774         bnx2x_release_nvram_lock(bp);
10775
10776         return rc;
10777 }
10778
10779 static int bnx2x_set_eeprom(struct net_device *dev,
10780                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10781 {
10782         struct bnx2x *bp = netdev_priv(dev);
10783         int port = BP_PORT(bp);
10784         int rc = 0;
10785
10786         if (!netif_running(dev))
10787                 return -EAGAIN;
10788
10789         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10790            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10791            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10792            eeprom->len, eeprom->len);
10793
10794         /* parameters already validated in ethtool_set_eeprom */
10795
10796         /* PHY eeprom can be accessed only by the PMF */
10797         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10798             !bp->port.pmf)
10799                 return -EINVAL;
10800
10801         if (eeprom->magic == 0x50485950) {
10802                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10803                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10804
10805                 bnx2x_acquire_phy_lock(bp);
10806                 rc |= bnx2x_link_reset(&bp->link_params,
10807                                        &bp->link_vars, 0);
10808                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10809                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10810                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10811                                        MISC_REGISTERS_GPIO_HIGH, port);
10812                 bnx2x_release_phy_lock(bp);
10813                 bnx2x_link_report(bp);
10814
10815         } else if (eeprom->magic == 0x50485952) {
10816                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10817                 if (bp->state == BNX2X_STATE_OPEN) {
10818                         bnx2x_acquire_phy_lock(bp);
10819                         rc |= bnx2x_link_reset(&bp->link_params,
10820                                                &bp->link_vars, 1);
10821
10822                         rc |= bnx2x_phy_init(&bp->link_params,
10823                                              &bp->link_vars);
10824                         bnx2x_release_phy_lock(bp);
10825                         bnx2x_calc_fc_adv(bp);
10826                 }
10827         } else if (eeprom->magic == 0x53985943) {
10828                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10829                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10830                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10831                         u8 ext_phy_addr =
10832                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10833
10834                         /* DSP Remove Download Mode */
10835                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10836                                        MISC_REGISTERS_GPIO_LOW, port);
10837
10838                         bnx2x_acquire_phy_lock(bp);
10839
10840                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10841
10842                         /* wait 0.5 sec to allow it to run */
10843                         msleep(500);
10844                         bnx2x_ext_phy_hw_reset(bp, port);
10845                         msleep(500);
10846                         bnx2x_release_phy_lock(bp);
10847                 }
10848         } else
10849                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10850
10851         return rc;
10852 }
10853
10854 static int bnx2x_get_coalesce(struct net_device *dev,
10855                               struct ethtool_coalesce *coal)
10856 {
10857         struct bnx2x *bp = netdev_priv(dev);
10858
10859         memset(coal, 0, sizeof(struct ethtool_coalesce));
10860
10861         coal->rx_coalesce_usecs = bp->rx_ticks;
10862         coal->tx_coalesce_usecs = bp->tx_ticks;
10863
10864         return 0;
10865 }
10866
10867 static int bnx2x_set_coalesce(struct net_device *dev,
10868                               struct ethtool_coalesce *coal)
10869 {
10870         struct bnx2x *bp = netdev_priv(dev);
10871
10872         bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10873         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10874                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10875
10876         bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10877         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10878                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10879
10880         if (netif_running(dev))
10881                 bnx2x_update_coalesce(bp);
10882
10883         return 0;
10884 }
10885
10886 static void bnx2x_get_ringparam(struct net_device *dev,
10887                                 struct ethtool_ringparam *ering)
10888 {
10889         struct bnx2x *bp = netdev_priv(dev);
10890
10891         ering->rx_max_pending = MAX_RX_AVAIL;
10892         ering->rx_mini_max_pending = 0;
10893         ering->rx_jumbo_max_pending = 0;
10894
10895         ering->rx_pending = bp->rx_ring_size;
10896         ering->rx_mini_pending = 0;
10897         ering->rx_jumbo_pending = 0;
10898
10899         ering->tx_max_pending = MAX_TX_AVAIL;
10900         ering->tx_pending = bp->tx_ring_size;
10901 }
10902
10903 static int bnx2x_set_ringparam(struct net_device *dev,
10904                                struct ethtool_ringparam *ering)
10905 {
10906         struct bnx2x *bp = netdev_priv(dev);
10907         int rc = 0;
10908
10909         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10910                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10911                 return -EAGAIN;
10912         }
10913
10914         if ((ering->rx_pending > MAX_RX_AVAIL) ||
10915             (ering->tx_pending > MAX_TX_AVAIL) ||
10916             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10917                 return -EINVAL;
10918
10919         bp->rx_ring_size = ering->rx_pending;
10920         bp->tx_ring_size = ering->tx_pending;
10921
10922         if (netif_running(dev)) {
10923                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10924                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10925         }
10926
10927         return rc;
10928 }
10929
10930 static void bnx2x_get_pauseparam(struct net_device *dev,
10931                                  struct ethtool_pauseparam *epause)
10932 {
10933         struct bnx2x *bp = netdev_priv(dev);
10934
10935         epause->autoneg = (bp->link_params.req_flow_ctrl ==
10936                            BNX2X_FLOW_CTRL_AUTO) &&
10937                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10938
10939         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10940                             BNX2X_FLOW_CTRL_RX);
10941         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10942                             BNX2X_FLOW_CTRL_TX);
10943
10944         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10945            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10946            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10947 }
10948
10949 static int bnx2x_set_pauseparam(struct net_device *dev,
10950                                 struct ethtool_pauseparam *epause)
10951 {
10952         struct bnx2x *bp = netdev_priv(dev);
10953
10954         if (IS_E1HMF(bp))
10955                 return 0;
10956
10957         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10958            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10959            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10960
10961         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10962
10963         if (epause->rx_pause)
10964                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10965
10966         if (epause->tx_pause)
10967                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10968
10969         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10970                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10971
10972         if (epause->autoneg) {
10973                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10974                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10975                         return -EINVAL;
10976                 }
10977
10978                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10979                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10980         }
10981
10982         DP(NETIF_MSG_LINK,
10983            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10984
10985         if (netif_running(dev)) {
10986                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10987                 bnx2x_link_set(bp);
10988         }
10989
10990         return 0;
10991 }
10992
10993 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10994 {
10995         struct bnx2x *bp = netdev_priv(dev);
10996         int changed = 0;
10997         int rc = 0;
10998
10999         if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
11000                 return -EINVAL;
11001
11002         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11003                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11004                 return -EAGAIN;
11005         }
11006
11007         /* TPA requires Rx CSUM offloading */
11008         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
11009                 if (!disable_tpa) {
11010                         if (!(dev->features & NETIF_F_LRO)) {
11011                                 dev->features |= NETIF_F_LRO;
11012                                 bp->flags |= TPA_ENABLE_FLAG;
11013                                 changed = 1;
11014                         }
11015                 } else
11016                         rc = -EINVAL;
11017         } else if (dev->features & NETIF_F_LRO) {
11018                 dev->features &= ~NETIF_F_LRO;
11019                 bp->flags &= ~TPA_ENABLE_FLAG;
11020                 changed = 1;
11021         }
11022
11023         if (data & ETH_FLAG_RXHASH)
11024                 dev->features |= NETIF_F_RXHASH;
11025         else
11026                 dev->features &= ~NETIF_F_RXHASH;
11027
11028         if (changed && netif_running(dev)) {
11029                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11030                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11031         }
11032
11033         return rc;
11034 }
11035
11036 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11037 {
11038         struct bnx2x *bp = netdev_priv(dev);
11039
11040         return bp->rx_csum;
11041 }
11042
11043 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11044 {
11045         struct bnx2x *bp = netdev_priv(dev);
11046         int rc = 0;
11047
11048         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11049                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11050                 return -EAGAIN;
11051         }
11052
11053         bp->rx_csum = data;
11054
11055         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11056            TPA'ed packets will be discarded due to wrong TCP CSUM */
11057         if (!data) {
11058                 u32 flags = ethtool_op_get_flags(dev);
11059
11060                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11061         }
11062
11063         return rc;
11064 }
11065
11066 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11067 {
11068         if (data) {
11069                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11070                 dev->features |= NETIF_F_TSO6;
11071         } else {
11072                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11073                 dev->features &= ~NETIF_F_TSO6;
11074         }
11075
11076         return 0;
11077 }
11078
11079 static const struct {
11080         char string[ETH_GSTRING_LEN];
11081 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11082         { "register_test (offline)" },
11083         { "memory_test (offline)" },
11084         { "loopback_test (offline)" },
11085         { "nvram_test (online)" },
11086         { "interrupt_test (online)" },
11087         { "link_test (online)" },
11088         { "idle check (online)" }
11089 };
11090
11091 static int bnx2x_test_registers(struct bnx2x *bp)
11092 {
11093         int idx, i, rc = -ENODEV;
11094         u32 wr_val = 0;
11095         int port = BP_PORT(bp);
11096         static const struct {
11097                 u32 offset0;
11098                 u32 offset1;
11099                 u32 mask;
11100         } reg_tbl[] = {
11101 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
11102                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
11103                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
11104                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
11105                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
11106                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
11107                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
11108                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
11109                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
11110                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
11111 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
11112                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
11113                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
11114                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
11115                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
11116                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11117                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
11118                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
11119                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
11120                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
11121 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
11122                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
11123                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
11124                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
11125                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
11126                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
11127                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
11128                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
11129                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
11130                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
11131 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
11132                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
11133                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
11134                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11135                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
11136                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11137                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
11138
11139                 { 0xffffffff, 0, 0x00000000 }
11140         };
11141
11142         if (!netif_running(bp->dev))
11143                 return rc;
11144
11145         /* Repeat the test twice:
11146            First by writing 0x00000000, second by writing 0xffffffff */
11147         for (idx = 0; idx < 2; idx++) {
11148
11149                 switch (idx) {
11150                 case 0:
11151                         wr_val = 0;
11152                         break;
11153                 case 1:
11154                         wr_val = 0xffffffff;
11155                         break;
11156                 }
11157
11158                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11159                         u32 offset, mask, save_val, val;
11160
11161                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11162                         mask = reg_tbl[i].mask;
11163
11164                         save_val = REG_RD(bp, offset);
11165
11166                         REG_WR(bp, offset, (wr_val & mask));
11167                         val = REG_RD(bp, offset);
11168
11169                         /* Restore the original register's value */
11170                         REG_WR(bp, offset, save_val);
11171
11172                         /* verify value is as expected */
11173                         if ((val & mask) != (wr_val & mask)) {
11174                                 DP(NETIF_MSG_PROBE,
11175                                    "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11176                                    offset, val, wr_val, mask);
11177                                 goto test_reg_exit;
11178                         }
11179                 }
11180         }
11181
11182         rc = 0;
11183
11184 test_reg_exit:
11185         return rc;
11186 }
11187
11188 static int bnx2x_test_memory(struct bnx2x *bp)
11189 {
11190         int i, j, rc = -ENODEV;
11191         u32 val;
11192         static const struct {
11193                 u32 offset;
11194                 int size;
11195         } mem_tbl[] = {
11196                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
11197                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11198                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
11199                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
11200                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
11201                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
11202                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
11203
11204                 { 0xffffffff, 0 }
11205         };
11206         static const struct {
11207                 char *name;
11208                 u32 offset;
11209                 u32 e1_mask;
11210                 u32 e1h_mask;
11211         } prty_tbl[] = {
11212                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
11213                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
11214                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
11215                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
11216                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
11217                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
11218
11219                 { NULL, 0xffffffff, 0, 0 }
11220         };
11221
11222         if (!netif_running(bp->dev))
11223                 return rc;
11224
11225         /* Go through all the memories */
11226         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11227                 for (j = 0; j < mem_tbl[i].size; j++)
11228                         REG_RD(bp, mem_tbl[i].offset + j*4);
11229
11230         /* Check the parity status */
11231         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11232                 val = REG_RD(bp, prty_tbl[i].offset);
11233                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11234                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11235                         DP(NETIF_MSG_HW,
11236                            "%s is 0x%x\n", prty_tbl[i].name, val);
11237                         goto test_mem_exit;
11238                 }
11239         }
11240
11241         rc = 0;
11242
11243 test_mem_exit:
11244         return rc;
11245 }
11246
11247 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11248 {
11249         int cnt = 1000;
11250
11251         if (link_up)
11252                 while (bnx2x_link_test(bp) && cnt--)
11253                         msleep(10);
11254 }
11255
11256 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11257 {
11258         unsigned int pkt_size, num_pkts, i;
11259         struct sk_buff *skb;
11260         unsigned char *packet;
11261         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11262         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11263         u16 tx_start_idx, tx_idx;
11264         u16 rx_start_idx, rx_idx;
11265         u16 pkt_prod, bd_prod;
11266         struct sw_tx_bd *tx_buf;
11267         struct eth_tx_start_bd *tx_start_bd;
11268         struct eth_tx_parse_bd *pbd = NULL;
11269         dma_addr_t mapping;
11270         union eth_rx_cqe *cqe;
11271         u8 cqe_fp_flags;
11272         struct sw_rx_bd *rx_buf;
11273         u16 len;
11274         int rc = -ENODEV;
11275
11276         /* check the loopback mode */
11277         switch (loopback_mode) {
11278         case BNX2X_PHY_LOOPBACK:
11279                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11280                         return -EINVAL;
11281                 break;
11282         case BNX2X_MAC_LOOPBACK:
11283                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11284                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11285                 break;
11286         default:
11287                 return -EINVAL;
11288         }
11289
11290         /* prepare the loopback packet */
11291         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11292                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11293         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11294         if (!skb) {
11295                 rc = -ENOMEM;
11296                 goto test_loopback_exit;
11297         }
11298         packet = skb_put(skb, pkt_size);
11299         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11300         memset(packet + ETH_ALEN, 0, ETH_ALEN);
11301         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11302         for (i = ETH_HLEN; i < pkt_size; i++)
11303                 packet[i] = (unsigned char) (i & 0xff);
11304
11305         /* send the loopback packet */
11306         num_pkts = 0;
11307         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11308         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11309
11310         pkt_prod = fp_tx->tx_pkt_prod++;
11311         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11312         tx_buf->first_bd = fp_tx->tx_bd_prod;
11313         tx_buf->skb = skb;
11314         tx_buf->flags = 0;
11315
11316         bd_prod = TX_BD(fp_tx->tx_bd_prod);
11317         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11318         mapping = dma_map_single(&bp->pdev->dev, skb->data,
11319                                  skb_headlen(skb), DMA_TO_DEVICE);
11320         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11321         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11322         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11323         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11324         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11325         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11326         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11327                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11328
11329         /* turn on parsing and get a BD */
11330         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11331         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11332
11333         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11334
11335         wmb();
11336
11337         fp_tx->tx_db.data.prod += 2;
11338         barrier();
11339         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11340
11341         mmiowb();
11342
11343         num_pkts++;
11344         fp_tx->tx_bd_prod += 2; /* start + pbd */
11345
11346         udelay(100);
11347
11348         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11349         if (tx_idx != tx_start_idx + num_pkts)
11350                 goto test_loopback_exit;
11351
11352         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11353         if (rx_idx != rx_start_idx + num_pkts)
11354                 goto test_loopback_exit;
11355
11356         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11357         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11358         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11359                 goto test_loopback_rx_exit;
11360
11361         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11362         if (len != pkt_size)
11363                 goto test_loopback_rx_exit;
11364
11365         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11366         skb = rx_buf->skb;
11367         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11368         for (i = ETH_HLEN; i < pkt_size; i++)
11369                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11370                         goto test_loopback_rx_exit;
11371
11372         rc = 0;
11373
11374 test_loopback_rx_exit:
11375
11376         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11377         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11378         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11379         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11380
11381         /* Update producers */
11382         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11383                              fp_rx->rx_sge_prod);
11384
11385 test_loopback_exit:
11386         bp->link_params.loopback_mode = LOOPBACK_NONE;
11387
11388         return rc;
11389 }
11390
11391 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11392 {
11393         int rc = 0, res;
11394
11395         if (BP_NOMCP(bp))
11396                 return rc;
11397
11398         if (!netif_running(bp->dev))
11399                 return BNX2X_LOOPBACK_FAILED;
11400
11401         bnx2x_netif_stop(bp, 1);
11402         bnx2x_acquire_phy_lock(bp);
11403
11404         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11405         if (res) {
11406                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
11407                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11408         }
11409
11410         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11411         if (res) {
11412                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
11413                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11414         }
11415
11416         bnx2x_release_phy_lock(bp);
11417         bnx2x_netif_start(bp);
11418
11419         return rc;
11420 }
11421
11422 #define CRC32_RESIDUAL                  0xdebb20e3
11423
11424 static int bnx2x_test_nvram(struct bnx2x *bp)
11425 {
11426         static const struct {
11427                 int offset;
11428                 int size;
11429         } nvram_tbl[] = {
11430                 {     0,  0x14 }, /* bootstrap */
11431                 {  0x14,  0xec }, /* dir */
11432                 { 0x100, 0x350 }, /* manuf_info */
11433                 { 0x450,  0xf0 }, /* feature_info */
11434                 { 0x640,  0x64 }, /* upgrade_key_info */
11435                 { 0x6a4,  0x64 },
11436                 { 0x708,  0x70 }, /* manuf_key_info */
11437                 { 0x778,  0x70 },
11438                 {     0,     0 }
11439         };
11440         __be32 buf[0x350 / 4];
11441         u8 *data = (u8 *)buf;
11442         int i, rc;
11443         u32 magic, crc;
11444
11445         if (BP_NOMCP(bp))
11446                 return 0;
11447
11448         rc = bnx2x_nvram_read(bp, 0, data, 4);
11449         if (rc) {
11450                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11451                 goto test_nvram_exit;
11452         }
11453
11454         magic = be32_to_cpu(buf[0]);
11455         if (magic != 0x669955aa) {
11456                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11457                 rc = -ENODEV;
11458                 goto test_nvram_exit;
11459         }
11460
11461         for (i = 0; nvram_tbl[i].size; i++) {
11462
11463                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11464                                       nvram_tbl[i].size);
11465                 if (rc) {
11466                         DP(NETIF_MSG_PROBE,
11467                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11468                         goto test_nvram_exit;
11469                 }
11470
11471                 crc = ether_crc_le(nvram_tbl[i].size, data);
11472                 if (crc != CRC32_RESIDUAL) {
11473                         DP(NETIF_MSG_PROBE,
11474                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11475                         rc = -ENODEV;
11476                         goto test_nvram_exit;
11477                 }
11478         }
11479
11480 test_nvram_exit:
11481         return rc;
11482 }
11483
11484 static int bnx2x_test_intr(struct bnx2x *bp)
11485 {
11486         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11487         int i, rc;
11488
11489         if (!netif_running(bp->dev))
11490                 return -ENODEV;
11491
11492         config->hdr.length = 0;
11493         if (CHIP_IS_E1(bp))
11494                 /* use last unicast entries */
11495                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11496         else
11497                 config->hdr.offset = BP_FUNC(bp);
11498         config->hdr.client_id = bp->fp->cl_id;
11499         config->hdr.reserved1 = 0;
11500
11501         bp->set_mac_pending++;
11502         smp_wmb();
11503         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11504                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11505                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11506         if (rc == 0) {
11507                 for (i = 0; i < 10; i++) {
11508                         if (!bp->set_mac_pending)
11509                                 break;
11510                         smp_rmb();
11511                         msleep_interruptible(10);
11512                 }
11513                 if (i == 10)
11514                         rc = -ENODEV;
11515         }
11516
11517         return rc;
11518 }
11519
11520 static void bnx2x_self_test(struct net_device *dev,
11521                             struct ethtool_test *etest, u64 *buf)
11522 {
11523         struct bnx2x *bp = netdev_priv(dev);
11524
11525         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11526                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11527                 etest->flags |= ETH_TEST_FL_FAILED;
11528                 return;
11529         }
11530
11531         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11532
11533         if (!netif_running(dev))
11534                 return;
11535
11536         /* offline tests are not supported in MF mode */
11537         if (IS_E1HMF(bp))
11538                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11539
11540         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11541                 int port = BP_PORT(bp);
11542                 u32 val;
11543                 u8 link_up;
11544
11545                 /* save current value of input enable for TX port IF */
11546                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11547                 /* disable input for TX port IF */
11548                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11549
11550                 link_up = (bnx2x_link_test(bp) == 0);
11551                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11552                 bnx2x_nic_load(bp, LOAD_DIAG);
11553                 /* wait until link state is restored */
11554                 bnx2x_wait_for_link(bp, link_up);
11555
11556                 if (bnx2x_test_registers(bp) != 0) {
11557                         buf[0] = 1;
11558                         etest->flags |= ETH_TEST_FL_FAILED;
11559                 }
11560                 if (bnx2x_test_memory(bp) != 0) {
11561                         buf[1] = 1;
11562                         etest->flags |= ETH_TEST_FL_FAILED;
11563                 }
11564                 buf[2] = bnx2x_test_loopback(bp, link_up);
11565                 if (buf[2] != 0)
11566                         etest->flags |= ETH_TEST_FL_FAILED;
11567
11568                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11569
11570                 /* restore input for TX port IF */
11571                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11572
11573                 bnx2x_nic_load(bp, LOAD_NORMAL);
11574                 /* wait until link state is restored */
11575                 bnx2x_wait_for_link(bp, link_up);
11576         }
11577         if (bnx2x_test_nvram(bp) != 0) {
11578                 buf[3] = 1;
11579                 etest->flags |= ETH_TEST_FL_FAILED;
11580         }
11581         if (bnx2x_test_intr(bp) != 0) {
11582                 buf[4] = 1;
11583                 etest->flags |= ETH_TEST_FL_FAILED;
11584         }
11585         if (bp->port.pmf)
11586                 if (bnx2x_link_test(bp) != 0) {
11587                         buf[5] = 1;
11588                         etest->flags |= ETH_TEST_FL_FAILED;
11589                 }
11590
11591 #ifdef BNX2X_EXTRA_DEBUG
11592         bnx2x_panic_dump(bp);
11593 #endif
11594 }
11595
11596 static const struct {
11597         long offset;
11598         int size;
11599         u8 string[ETH_GSTRING_LEN];
11600 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11601 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11602         { Q_STATS_OFFSET32(error_bytes_received_hi),
11603                                                 8, "[%d]: rx_error_bytes" },
11604         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11605                                                 8, "[%d]: rx_ucast_packets" },
11606         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11607                                                 8, "[%d]: rx_mcast_packets" },
11608         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11609                                                 8, "[%d]: rx_bcast_packets" },
11610         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11611         { Q_STATS_OFFSET32(rx_err_discard_pkt),
11612                                          4, "[%d]: rx_phy_ip_err_discards"},
11613         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11614                                          4, "[%d]: rx_skb_alloc_discard" },
11615         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11616
11617 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11618         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11619                                                 8, "[%d]: tx_ucast_packets" },
11620         { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11621                                                 8, "[%d]: tx_mcast_packets" },
11622         { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11623                                                 8, "[%d]: tx_bcast_packets" }
11624 };
11625
11626 static const struct {
11627         long offset;
11628         int size;
11629         u32 flags;
11630 #define STATS_FLAGS_PORT                1
11631 #define STATS_FLAGS_FUNC                2
11632 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11633         u8 string[ETH_GSTRING_LEN];
11634 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11635 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11636                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
11637         { STATS_OFFSET32(error_bytes_received_hi),
11638                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11639         { STATS_OFFSET32(total_unicast_packets_received_hi),
11640                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11641         { STATS_OFFSET32(total_multicast_packets_received_hi),
11642                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11643         { STATS_OFFSET32(total_broadcast_packets_received_hi),
11644                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11645         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11646                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11647         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11648                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
11649         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11650                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11651         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11652                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11653 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11654                                 8, STATS_FLAGS_PORT, "rx_fragments" },
11655         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11656                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
11657         { STATS_OFFSET32(no_buff_discard_hi),
11658                                 8, STATS_FLAGS_BOTH, "rx_discards" },
11659         { STATS_OFFSET32(mac_filter_discard),
11660                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11661         { STATS_OFFSET32(xxoverflow_discard),
11662                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11663         { STATS_OFFSET32(brb_drop_hi),
11664                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11665         { STATS_OFFSET32(brb_truncate_hi),
11666                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11667         { STATS_OFFSET32(pause_frames_received_hi),
11668                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11669         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11670                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11671         { STATS_OFFSET32(nig_timer_max),
11672                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11673 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11674                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11675         { STATS_OFFSET32(rx_skb_alloc_failed),
11676                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11677         { STATS_OFFSET32(hw_csum_err),
11678                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11679
11680         { STATS_OFFSET32(total_bytes_transmitted_hi),
11681                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
11682         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11683                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11684         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11685                                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11686         { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11687                                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11688         { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11689                                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11690         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11691                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11692         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11693                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11694 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11695                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11696         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11697                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11698         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11699                                 8, STATS_FLAGS_PORT, "tx_deferred" },
11700         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11701                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11702         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11703                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11704         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11705                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11706         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11707                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11708         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11709                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11710         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11711                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11712         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11713                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11714 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11715                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11716         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11717                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11718         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11719                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11720         { STATS_OFFSET32(pause_frames_sent_hi),
11721                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11722 };
11723
11724 #define IS_PORT_STAT(i) \
11725         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11726 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11727 #define IS_E1HMF_MODE_STAT(bp) \
11728                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11729
11730 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11731 {
11732         struct bnx2x *bp = netdev_priv(dev);
11733         int i, num_stats;
11734
11735         switch (stringset) {
11736         case ETH_SS_STATS:
11737                 if (is_multi(bp)) {
11738                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11739                         if (!IS_E1HMF_MODE_STAT(bp))
11740                                 num_stats += BNX2X_NUM_STATS;
11741                 } else {
11742                         if (IS_E1HMF_MODE_STAT(bp)) {
11743                                 num_stats = 0;
11744                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
11745                                         if (IS_FUNC_STAT(i))
11746                                                 num_stats++;
11747                         } else
11748                                 num_stats = BNX2X_NUM_STATS;
11749                 }
11750                 return num_stats;
11751
11752         case ETH_SS_TEST:
11753                 return BNX2X_NUM_TESTS;
11754
11755         default:
11756                 return -EINVAL;
11757         }
11758 }
11759
11760 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11761 {
11762         struct bnx2x *bp = netdev_priv(dev);
11763         int i, j, k;
11764
11765         switch (stringset) {
11766         case ETH_SS_STATS:
11767                 if (is_multi(bp)) {
11768                         k = 0;
11769                         for_each_queue(bp, i) {
11770                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11771                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11772                                                 bnx2x_q_stats_arr[j].string, i);
11773                                 k += BNX2X_NUM_Q_STATS;
11774                         }
11775                         if (IS_E1HMF_MODE_STAT(bp))
11776                                 break;
11777                         for (j = 0; j < BNX2X_NUM_STATS; j++)
11778                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11779                                        bnx2x_stats_arr[j].string);
11780                 } else {
11781                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11782                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11783                                         continue;
11784                                 strcpy(buf + j*ETH_GSTRING_LEN,
11785                                        bnx2x_stats_arr[i].string);
11786                                 j++;
11787                         }
11788                 }
11789                 break;
11790
11791         case ETH_SS_TEST:
11792                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11793                 break;
11794         }
11795 }
11796
11797 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11798                                     struct ethtool_stats *stats, u64 *buf)
11799 {
11800         struct bnx2x *bp = netdev_priv(dev);
11801         u32 *hw_stats, *offset;
11802         int i, j, k;
11803
11804         if (is_multi(bp)) {
11805                 k = 0;
11806                 for_each_queue(bp, i) {
11807                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11808                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11809                                 if (bnx2x_q_stats_arr[j].size == 0) {
11810                                         /* skip this counter */
11811                                         buf[k + j] = 0;
11812                                         continue;
11813                                 }
11814                                 offset = (hw_stats +
11815                                           bnx2x_q_stats_arr[j].offset);
11816                                 if (bnx2x_q_stats_arr[j].size == 4) {
11817                                         /* 4-byte counter */
11818                                         buf[k + j] = (u64) *offset;
11819                                         continue;
11820                                 }
11821                                 /* 8-byte counter */
11822                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11823                         }
11824                         k += BNX2X_NUM_Q_STATS;
11825                 }
11826                 if (IS_E1HMF_MODE_STAT(bp))
11827                         return;
11828                 hw_stats = (u32 *)&bp->eth_stats;
11829                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11830                         if (bnx2x_stats_arr[j].size == 0) {
11831                                 /* skip this counter */
11832                                 buf[k + j] = 0;
11833                                 continue;
11834                         }
11835                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
11836                         if (bnx2x_stats_arr[j].size == 4) {
11837                                 /* 4-byte counter */
11838                                 buf[k + j] = (u64) *offset;
11839                                 continue;
11840                         }
11841                         /* 8-byte counter */
11842                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
11843                 }
11844         } else {
11845                 hw_stats = (u32 *)&bp->eth_stats;
11846                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11847                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11848                                 continue;
11849                         if (bnx2x_stats_arr[i].size == 0) {
11850                                 /* skip this counter */
11851                                 buf[j] = 0;
11852                                 j++;
11853                                 continue;
11854                         }
11855                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
11856                         if (bnx2x_stats_arr[i].size == 4) {
11857                                 /* 4-byte counter */
11858                                 buf[j] = (u64) *offset;
11859                                 j++;
11860                                 continue;
11861                         }
11862                         /* 8-byte counter */
11863                         buf[j] = HILO_U64(*offset, *(offset + 1));
11864                         j++;
11865                 }
11866         }
11867 }
11868
11869 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11870 {
11871         struct bnx2x *bp = netdev_priv(dev);
11872         int i;
11873
11874         if (!netif_running(dev))
11875                 return 0;
11876
11877         if (!bp->port.pmf)
11878                 return 0;
11879
11880         if (data == 0)
11881                 data = 2;
11882
11883         for (i = 0; i < (data * 2); i++) {
11884                 if ((i % 2) == 0)
11885                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11886                                       SPEED_1000);
11887                 else
11888                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11889
11890                 msleep_interruptible(500);
11891                 if (signal_pending(current))
11892                         break;
11893         }
11894
11895         if (bp->link_vars.link_up)
11896                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11897                               bp->link_vars.line_speed);
11898
11899         return 0;
11900 }
11901
11902 static const struct ethtool_ops bnx2x_ethtool_ops = {
11903         .get_settings           = bnx2x_get_settings,
11904         .set_settings           = bnx2x_set_settings,
11905         .get_drvinfo            = bnx2x_get_drvinfo,
11906         .get_regs_len           = bnx2x_get_regs_len,
11907         .get_regs               = bnx2x_get_regs,
11908         .get_wol                = bnx2x_get_wol,
11909         .set_wol                = bnx2x_set_wol,
11910         .get_msglevel           = bnx2x_get_msglevel,
11911         .set_msglevel           = bnx2x_set_msglevel,
11912         .nway_reset             = bnx2x_nway_reset,
11913         .get_link               = bnx2x_get_link,
11914         .get_eeprom_len         = bnx2x_get_eeprom_len,
11915         .get_eeprom             = bnx2x_get_eeprom,
11916         .set_eeprom             = bnx2x_set_eeprom,
11917         .get_coalesce           = bnx2x_get_coalesce,
11918         .set_coalesce           = bnx2x_set_coalesce,
11919         .get_ringparam          = bnx2x_get_ringparam,
11920         .set_ringparam          = bnx2x_set_ringparam,
11921         .get_pauseparam         = bnx2x_get_pauseparam,
11922         .set_pauseparam         = bnx2x_set_pauseparam,
11923         .get_rx_csum            = bnx2x_get_rx_csum,
11924         .set_rx_csum            = bnx2x_set_rx_csum,
11925         .get_tx_csum            = ethtool_op_get_tx_csum,
11926         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
11927         .set_flags              = bnx2x_set_flags,
11928         .get_flags              = ethtool_op_get_flags,
11929         .get_sg                 = ethtool_op_get_sg,
11930         .set_sg                 = ethtool_op_set_sg,
11931         .get_tso                = ethtool_op_get_tso,
11932         .set_tso                = bnx2x_set_tso,
11933         .self_test              = bnx2x_self_test,
11934         .get_sset_count         = bnx2x_get_sset_count,
11935         .get_strings            = bnx2x_get_strings,
11936         .phys_id                = bnx2x_phys_id,
11937         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
11938 };
11939
11940 /* end of ethtool_ops */
11941
11942 /****************************************************************************
11943 * General service functions
11944 ****************************************************************************/
11945
11946 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11947 {
11948         u16 pmcsr;
11949
11950         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11951
11952         switch (state) {
11953         case PCI_D0:
11954                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11955                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11956                                        PCI_PM_CTRL_PME_STATUS));
11957
11958                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11959                         /* delay required during transition out of D3hot */
11960                         msleep(20);
11961                 break;
11962
11963         case PCI_D3hot:
11964                 /* If there are other clients above don't
11965                    shut down the power */
11966                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11967                         return 0;
11968                 /* Don't shut down the power for emulation and FPGA */
11969                 if (CHIP_REV_IS_SLOW(bp))
11970                         return 0;
11971
11972                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11973                 pmcsr |= 3;
11974
11975                 if (bp->wol)
11976                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11977
11978                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11979                                       pmcsr);
11980
11981                 /* No more memory access after this point until
11982                 * device is brought back to D0.
11983                 */
11984                 break;
11985
11986         default:
11987                 return -EINVAL;
11988         }
11989         return 0;
11990 }
11991
11992 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11993 {
11994         u16 rx_cons_sb;
11995
11996         /* Tell compiler that status block fields can change */
11997         barrier();
11998         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11999         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
12000                 rx_cons_sb++;
12001         return (fp->rx_comp_cons != rx_cons_sb);
12002 }
12003
12004 /*
12005  * net_device service functions
12006  */
12007
12008 static int bnx2x_poll(struct napi_struct *napi, int budget)
12009 {
12010         int work_done = 0;
12011         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
12012                                                  napi);
12013         struct bnx2x *bp = fp->bp;
12014
12015         while (1) {
12016 #ifdef BNX2X_STOP_ON_ERROR
12017                 if (unlikely(bp->panic)) {
12018                         napi_complete(napi);
12019                         return 0;
12020                 }
12021 #endif
12022
12023                 if (bnx2x_has_tx_work(fp))
12024                         bnx2x_tx_int(fp);
12025
12026                 if (bnx2x_has_rx_work(fp)) {
12027                         work_done += bnx2x_rx_int(fp, budget - work_done);
12028
12029                         /* must not complete if we consumed full budget */
12030                         if (work_done >= budget)
12031                                 break;
12032                 }
12033
12034                 /* Fall out from the NAPI loop if needed */
12035                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12036                         bnx2x_update_fpsb_idx(fp);
12037                 /* bnx2x_has_rx_work() reads the status block, thus we need
12038                  * to ensure that status block indices have been actually read
12039                  * (bnx2x_update_fpsb_idx) prior to this check
12040                  * (bnx2x_has_rx_work) so that we won't write the "newer"
12041                  * value of the status block to IGU (if there was a DMA right
12042                  * after bnx2x_has_rx_work and if there is no rmb, the memory
12043                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
12044                  * before bnx2x_ack_sb). In this case there will never be
12045                  * another interrupt until there is another update of the
12046                  * status block, while there is still unhandled work.
12047                  */
12048                         rmb();
12049
12050                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12051                                 napi_complete(napi);
12052                                 /* Re-enable interrupts */
12053                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12054                                              le16_to_cpu(fp->fp_c_idx),
12055                                              IGU_INT_NOP, 1);
12056                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12057                                              le16_to_cpu(fp->fp_u_idx),
12058                                              IGU_INT_ENABLE, 1);
12059                                 break;
12060                         }
12061                 }
12062         }
12063
12064         return work_done;
12065 }
12066
12067
12068 /* we split the first BD into headers and data BDs
12069  * to ease the pain of our fellow microcode engineers
12070  * we use one mapping for both BDs
12071  * So far this has only been observed to happen
12072  * in Other Operating Systems(TM)
12073  */
12074 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12075                                    struct bnx2x_fastpath *fp,
12076                                    struct sw_tx_bd *tx_buf,
12077                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
12078                                    u16 bd_prod, int nbd)
12079 {
12080         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12081         struct eth_tx_bd *d_tx_bd;
12082         dma_addr_t mapping;
12083         int old_len = le16_to_cpu(h_tx_bd->nbytes);
12084
12085         /* first fix first BD */
12086         h_tx_bd->nbd = cpu_to_le16(nbd);
12087         h_tx_bd->nbytes = cpu_to_le16(hlen);
12088
12089         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12090            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12091            h_tx_bd->addr_lo, h_tx_bd->nbd);
12092
12093         /* now get a new data BD
12094          * (after the pbd) and fill it */
12095         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12096         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12097
12098         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12099                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12100
12101         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12102         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12103         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12104
12105         /* this marks the BD as one that has no individual mapping */
12106         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12107
12108         DP(NETIF_MSG_TX_QUEUED,
12109            "TSO split data size is %d (%x:%x)\n",
12110            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12111
12112         /* update tx_bd */
12113         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12114
12115         return bd_prod;
12116 }
12117
12118 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12119 {
12120         if (fix > 0)
12121                 csum = (u16) ~csum_fold(csum_sub(csum,
12122                                 csum_partial(t_header - fix, fix, 0)));
12123
12124         else if (fix < 0)
12125                 csum = (u16) ~csum_fold(csum_add(csum,
12126                                 csum_partial(t_header, -fix, 0)));
12127
12128         return swab16(csum);
12129 }
12130
12131 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12132 {
12133         u32 rc;
12134
12135         if (skb->ip_summed != CHECKSUM_PARTIAL)
12136                 rc = XMIT_PLAIN;
12137
12138         else {
12139                 if (skb->protocol == htons(ETH_P_IPV6)) {
12140                         rc = XMIT_CSUM_V6;
12141                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12142                                 rc |= XMIT_CSUM_TCP;
12143
12144                 } else {
12145                         rc = XMIT_CSUM_V4;
12146                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12147                                 rc |= XMIT_CSUM_TCP;
12148                 }
12149         }
12150
12151         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12152                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12153
12154         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12155                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12156
12157         return rc;
12158 }
12159
12160 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12161 /* check if packet requires linearization (packet is too fragmented)
12162    no need to check fragmentation if page size > 8K (there will be no
12163    violation to FW restrictions) */
12164 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12165                              u32 xmit_type)
12166 {
12167         int to_copy = 0;
12168         int hlen = 0;
12169         int first_bd_sz = 0;
12170
12171         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12172         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12173
12174                 if (xmit_type & XMIT_GSO) {
12175                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12176                         /* Check if LSO packet needs to be copied:
12177                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12178                         int wnd_size = MAX_FETCH_BD - 3;
12179                         /* Number of windows to check */
12180                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12181                         int wnd_idx = 0;
12182                         int frag_idx = 0;
12183                         u32 wnd_sum = 0;
12184
12185                         /* Headers length */
12186                         hlen = (int)(skb_transport_header(skb) - skb->data) +
12187                                 tcp_hdrlen(skb);
12188
12189                         /* Amount of data (w/o headers) on linear part of SKB*/
12190                         first_bd_sz = skb_headlen(skb) - hlen;
12191
12192                         wnd_sum  = first_bd_sz;
12193
12194                         /* Calculate the first sum - it's special */
12195                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12196                                 wnd_sum +=
12197                                         skb_shinfo(skb)->frags[frag_idx].size;
12198
12199                         /* If there was data on linear skb data - check it */
12200                         if (first_bd_sz > 0) {
12201                                 if (unlikely(wnd_sum < lso_mss)) {
12202                                         to_copy = 1;
12203                                         goto exit_lbl;
12204                                 }
12205
12206                                 wnd_sum -= first_bd_sz;
12207                         }
12208
12209                         /* Others are easier: run through the frag list and
12210                            check all windows */
12211                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12212                                 wnd_sum +=
12213                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12214
12215                                 if (unlikely(wnd_sum < lso_mss)) {
12216                                         to_copy = 1;
12217                                         break;
12218                                 }
12219                                 wnd_sum -=
12220                                         skb_shinfo(skb)->frags[wnd_idx].size;
12221                         }
12222                 } else {
12223                         /* in non-LSO too fragmented packet should always
12224                            be linearized */
12225                         to_copy = 1;
12226                 }
12227         }
12228
12229 exit_lbl:
12230         if (unlikely(to_copy))
12231                 DP(NETIF_MSG_TX_QUEUED,
12232                    "Linearization IS REQUIRED for %s packet. "
12233                    "num_frags %d  hlen %d  first_bd_sz %d\n",
12234                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12235                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12236
12237         return to_copy;
12238 }
12239 #endif
12240
12241 /* called with netif_tx_lock
12242  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12243  * netif_wake_queue()
12244  */
12245 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12246 {
12247         struct bnx2x *bp = netdev_priv(dev);
12248         struct bnx2x_fastpath *fp;
12249         struct netdev_queue *txq;
12250         struct sw_tx_bd *tx_buf;
12251         struct eth_tx_start_bd *tx_start_bd;
12252         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12253         struct eth_tx_parse_bd *pbd = NULL;
12254         u16 pkt_prod, bd_prod;
12255         int nbd, fp_index;
12256         dma_addr_t mapping;
12257         u32 xmit_type = bnx2x_xmit_type(bp, skb);
12258         int i;
12259         u8 hlen = 0;
12260         __le16 pkt_size = 0;
12261         struct ethhdr *eth;
12262         u8 mac_type = UNICAST_ADDRESS;
12263
12264 #ifdef BNX2X_STOP_ON_ERROR
12265         if (unlikely(bp->panic))
12266                 return NETDEV_TX_BUSY;
12267 #endif
12268
12269         fp_index = skb_get_queue_mapping(skb);
12270         txq = netdev_get_tx_queue(dev, fp_index);
12271
12272         fp = &bp->fp[fp_index];
12273
12274         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12275                 fp->eth_q_stats.driver_xoff++;
12276                 netif_tx_stop_queue(txq);
12277                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12278                 return NETDEV_TX_BUSY;
12279         }
12280
12281         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
12282            "  gso type %x  xmit_type %x\n",
12283            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12284            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12285
12286         eth = (struct ethhdr *)skb->data;
12287
12288         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12289         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12290                 if (is_broadcast_ether_addr(eth->h_dest))
12291                         mac_type = BROADCAST_ADDRESS;
12292                 else
12293                         mac_type = MULTICAST_ADDRESS;
12294         }
12295
12296 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12297         /* First, check if we need to linearize the skb (due to FW
12298            restrictions). No need to check fragmentation if page size > 8K
12299            (there will be no violation to FW restrictions) */
12300         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12301                 /* Statistics of linearization */
12302                 bp->lin_cnt++;
12303                 if (skb_linearize(skb) != 0) {
12304                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12305                            "silently dropping this SKB\n");
12306                         dev_kfree_skb_any(skb);
12307                         return NETDEV_TX_OK;
12308                 }
12309         }
12310 #endif
12311
12312         /*
12313         Please read carefully. First we use one BD which we mark as start,
12314         then we have a parsing info BD (used for TSO or xsum),
12315         and only then we have the rest of the TSO BDs.
12316         (don't forget to mark the last one as last,
12317         and to unmap only AFTER you write to the BD ...)
12318         And above all, all pdb sizes are in words - NOT DWORDS!
12319         */
12320
12321         pkt_prod = fp->tx_pkt_prod++;
12322         bd_prod = TX_BD(fp->tx_bd_prod);
12323
12324         /* get a tx_buf and first BD */
12325         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12326         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12327
12328         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12329         tx_start_bd->general_data =  (mac_type <<
12330                                         ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12331         /* header nbd */
12332         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12333
12334         /* remember the first BD of the packet */
12335         tx_buf->first_bd = fp->tx_bd_prod;
12336         tx_buf->skb = skb;
12337         tx_buf->flags = 0;
12338
12339         DP(NETIF_MSG_TX_QUEUED,
12340            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
12341            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12342
12343 #ifdef BCM_VLAN
12344         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12345             (bp->flags & HW_VLAN_TX_FLAG)) {
12346                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12347                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12348         } else
12349 #endif
12350                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12351
12352         /* turn on parsing and get a BD */
12353         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12354         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12355
12356         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12357
12358         if (xmit_type & XMIT_CSUM) {
12359                 hlen = (skb_network_header(skb) - skb->data) / 2;
12360
12361                 /* for now NS flag is not used in Linux */
12362                 pbd->global_data =
12363                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12364                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12365
12366                 pbd->ip_hlen = (skb_transport_header(skb) -
12367                                 skb_network_header(skb)) / 2;
12368
12369                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12370
12371                 pbd->total_hlen = cpu_to_le16(hlen);
12372                 hlen = hlen*2;
12373
12374                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12375
12376                 if (xmit_type & XMIT_CSUM_V4)
12377                         tx_start_bd->bd_flags.as_bitfield |=
12378                                                 ETH_TX_BD_FLAGS_IP_CSUM;
12379                 else
12380                         tx_start_bd->bd_flags.as_bitfield |=
12381                                                 ETH_TX_BD_FLAGS_IPV6;
12382
12383                 if (xmit_type & XMIT_CSUM_TCP) {
12384                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12385
12386                 } else {
12387                         s8 fix = SKB_CS_OFF(skb); /* signed! */
12388
12389                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12390
12391                         DP(NETIF_MSG_TX_QUEUED,
12392                            "hlen %d  fix %d  csum before fix %x\n",
12393                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12394
12395                         /* HW bug: fixup the CSUM */
12396                         pbd->tcp_pseudo_csum =
12397                                 bnx2x_csum_fix(skb_transport_header(skb),
12398                                                SKB_CS(skb), fix);
12399
12400                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12401                            pbd->tcp_pseudo_csum);
12402                 }
12403         }
12404
12405         mapping = dma_map_single(&bp->pdev->dev, skb->data,
12406                                  skb_headlen(skb), DMA_TO_DEVICE);
12407
12408         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12409         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12410         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12411         tx_start_bd->nbd = cpu_to_le16(nbd);
12412         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12413         pkt_size = tx_start_bd->nbytes;
12414
12415         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
12416            "  nbytes %d  flags %x  vlan %x\n",
12417            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12418            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12419            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12420
12421         if (xmit_type & XMIT_GSO) {
12422
12423                 DP(NETIF_MSG_TX_QUEUED,
12424                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
12425                    skb->len, hlen, skb_headlen(skb),
12426                    skb_shinfo(skb)->gso_size);
12427
12428                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12429
12430                 if (unlikely(skb_headlen(skb) > hlen))
12431                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12432                                                  hlen, bd_prod, ++nbd);
12433
12434                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12435                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12436                 pbd->tcp_flags = pbd_tcp_flags(skb);
12437
12438                 if (xmit_type & XMIT_GSO_V4) {
12439                         pbd->ip_id = swab16(ip_hdr(skb)->id);
12440                         pbd->tcp_pseudo_csum =
12441                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12442                                                           ip_hdr(skb)->daddr,
12443                                                           0, IPPROTO_TCP, 0));
12444
12445                 } else
12446                         pbd->tcp_pseudo_csum =
12447                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12448                                                         &ipv6_hdr(skb)->daddr,
12449                                                         0, IPPROTO_TCP, 0));
12450
12451                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12452         }
12453         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12454
12455         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12456                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12457
12458                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12459                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12460                 if (total_pkt_bd == NULL)
12461                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12462
12463                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12464                                        frag->page_offset,
12465                                        frag->size, DMA_TO_DEVICE);
12466
12467                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12468                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12469                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12470                 le16_add_cpu(&pkt_size, frag->size);
12471
12472                 DP(NETIF_MSG_TX_QUEUED,
12473                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
12474                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12475                    le16_to_cpu(tx_data_bd->nbytes));
12476         }
12477
12478         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12479
12480         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12481
12482         /* now send a tx doorbell, counting the next BD
12483          * if the packet contains or ends with it
12484          */
12485         if (TX_BD_POFF(bd_prod) < nbd)
12486                 nbd++;
12487
12488         if (total_pkt_bd != NULL)
12489                 total_pkt_bd->total_pkt_bytes = pkt_size;
12490
12491         if (pbd)
12492                 DP(NETIF_MSG_TX_QUEUED,
12493                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
12494                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
12495                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12496                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12497                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12498
12499         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
12500
12501         /*
12502          * Make sure that the BD data is updated before updating the producer
12503          * since FW might read the BD right after the producer is updated.
12504          * This is only applicable for weak-ordered memory model archs such
12505          * as IA-64. The following barrier is also mandatory since FW will
12506          * assumes packets must have BDs.
12507          */
12508         wmb();
12509
12510         fp->tx_db.data.prod += nbd;
12511         barrier();
12512         DOORBELL(bp, fp->index, fp->tx_db.raw);
12513
12514         mmiowb();
12515
12516         fp->tx_bd_prod += nbd;
12517
12518         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12519                 netif_tx_stop_queue(txq);
12520
12521                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12522                  * ordering of set_bit() in netif_tx_stop_queue() and read of
12523                  * fp->bd_tx_cons */
12524                 smp_mb();
12525
12526                 fp->eth_q_stats.driver_xoff++;
12527                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12528                         netif_tx_wake_queue(txq);
12529         }
12530         fp->tx_pkt++;
12531
12532         return NETDEV_TX_OK;
12533 }
12534
12535 /* called with rtnl_lock */
12536 static int bnx2x_open(struct net_device *dev)
12537 {
12538         struct bnx2x *bp = netdev_priv(dev);
12539
12540         netif_carrier_off(dev);
12541
12542         bnx2x_set_power_state(bp, PCI_D0);
12543
12544         if (!bnx2x_reset_is_done(bp)) {
12545                 do {
12546                         /* Reset MCP mail box sequence if there is on going
12547                          * recovery
12548                          */
12549                         bp->fw_seq = 0;
12550
12551                         /* If it's the first function to load and reset done
12552                          * is still not cleared it may mean that. We don't
12553                          * check the attention state here because it may have
12554                          * already been cleared by a "common" reset but we
12555                          * shell proceed with "process kill" anyway.
12556                          */
12557                         if ((bnx2x_get_load_cnt(bp) == 0) &&
12558                                 bnx2x_trylock_hw_lock(bp,
12559                                 HW_LOCK_RESOURCE_RESERVED_08) &&
12560                                 (!bnx2x_leader_reset(bp))) {
12561                                 DP(NETIF_MSG_HW, "Recovered in open\n");
12562                                 break;
12563                         }
12564
12565                         bnx2x_set_power_state(bp, PCI_D3hot);
12566
12567                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12568                         " completed yet. Try again later. If u still see this"
12569                         " message after a few retries then power cycle is"
12570                         " required.\n", bp->dev->name);
12571
12572                         return -EAGAIN;
12573                 } while (0);
12574         }
12575
12576         bp->recovery_state = BNX2X_RECOVERY_DONE;
12577
12578         return bnx2x_nic_load(bp, LOAD_OPEN);
12579 }
12580
12581 /* called with rtnl_lock */
12582 static int bnx2x_close(struct net_device *dev)
12583 {
12584         struct bnx2x *bp = netdev_priv(dev);
12585
12586         /* Unload the driver, release IRQs */
12587         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12588         bnx2x_set_power_state(bp, PCI_D3hot);
12589
12590         return 0;
12591 }
12592
12593 /* called with netif_tx_lock from dev_mcast.c */
12594 static void bnx2x_set_rx_mode(struct net_device *dev)
12595 {
12596         struct bnx2x *bp = netdev_priv(dev);
12597         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12598         int port = BP_PORT(bp);
12599
12600         if (bp->state != BNX2X_STATE_OPEN) {
12601                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12602                 return;
12603         }
12604
12605         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12606
12607         if (dev->flags & IFF_PROMISC)
12608                 rx_mode = BNX2X_RX_MODE_PROMISC;
12609
12610         else if ((dev->flags & IFF_ALLMULTI) ||
12611                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12612                   CHIP_IS_E1(bp)))
12613                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12614
12615         else { /* some multicasts */
12616                 if (CHIP_IS_E1(bp)) {
12617                         int i, old, offset;
12618                         struct netdev_hw_addr *ha;
12619                         struct mac_configuration_cmd *config =
12620                                                 bnx2x_sp(bp, mcast_config);
12621
12622                         i = 0;
12623                         netdev_for_each_mc_addr(ha, dev) {
12624                                 config->config_table[i].
12625                                         cam_entry.msb_mac_addr =
12626                                         swab16(*(u16 *)&ha->addr[0]);
12627                                 config->config_table[i].
12628                                         cam_entry.middle_mac_addr =
12629                                         swab16(*(u16 *)&ha->addr[2]);
12630                                 config->config_table[i].
12631                                         cam_entry.lsb_mac_addr =
12632                                         swab16(*(u16 *)&ha->addr[4]);
12633                                 config->config_table[i].cam_entry.flags =
12634                                                         cpu_to_le16(port);
12635                                 config->config_table[i].
12636                                         target_table_entry.flags = 0;
12637                                 config->config_table[i].target_table_entry.
12638                                         clients_bit_vector =
12639                                                 cpu_to_le32(1 << BP_L_ID(bp));
12640                                 config->config_table[i].
12641                                         target_table_entry.vlan_id = 0;
12642
12643                                 DP(NETIF_MSG_IFUP,
12644                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12645                                    config->config_table[i].
12646                                                 cam_entry.msb_mac_addr,
12647                                    config->config_table[i].
12648                                                 cam_entry.middle_mac_addr,
12649                                    config->config_table[i].
12650                                                 cam_entry.lsb_mac_addr);
12651                                 i++;
12652                         }
12653                         old = config->hdr.length;
12654                         if (old > i) {
12655                                 for (; i < old; i++) {
12656                                         if (CAM_IS_INVALID(config->
12657                                                            config_table[i])) {
12658                                                 /* already invalidated */
12659                                                 break;
12660                                         }
12661                                         /* invalidate */
12662                                         CAM_INVALIDATE(config->
12663                                                        config_table[i]);
12664                                 }
12665                         }
12666
12667                         if (CHIP_REV_IS_SLOW(bp))
12668                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12669                         else
12670                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
12671
12672                         config->hdr.length = i;
12673                         config->hdr.offset = offset;
12674                         config->hdr.client_id = bp->fp->cl_id;
12675                         config->hdr.reserved1 = 0;
12676
12677                         bp->set_mac_pending++;
12678                         smp_wmb();
12679
12680                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12681                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12682                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12683                                       0);
12684                 } else { /* E1H */
12685                         /* Accept one or more multicasts */
12686                         struct netdev_hw_addr *ha;
12687                         u32 mc_filter[MC_HASH_SIZE];
12688                         u32 crc, bit, regidx;
12689                         int i;
12690
12691                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12692
12693                         netdev_for_each_mc_addr(ha, dev) {
12694                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12695                                    ha->addr);
12696
12697                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12698                                 bit = (crc >> 24) & 0xff;
12699                                 regidx = bit >> 5;
12700                                 bit &= 0x1f;
12701                                 mc_filter[regidx] |= (1 << bit);
12702                         }
12703
12704                         for (i = 0; i < MC_HASH_SIZE; i++)
12705                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12706                                        mc_filter[i]);
12707                 }
12708         }
12709
12710         bp->rx_mode = rx_mode;
12711         bnx2x_set_storm_rx_mode(bp);
12712 }
12713
12714 /* called with rtnl_lock */
12715 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12716 {
12717         struct sockaddr *addr = p;
12718         struct bnx2x *bp = netdev_priv(dev);
12719
12720         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12721                 return -EINVAL;
12722
12723         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12724         if (netif_running(dev)) {
12725                 if (CHIP_IS_E1(bp))
12726                         bnx2x_set_eth_mac_addr_e1(bp, 1);
12727                 else
12728                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
12729         }
12730
12731         return 0;
12732 }
12733
12734 /* called with rtnl_lock */
12735 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12736                            int devad, u16 addr)
12737 {
12738         struct bnx2x *bp = netdev_priv(netdev);
12739         u16 value;
12740         int rc;
12741         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12742
12743         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12744            prtad, devad, addr);
12745
12746         if (prtad != bp->mdio.prtad) {
12747                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12748                    prtad, bp->mdio.prtad);
12749                 return -EINVAL;
12750         }
12751
12752         /* The HW expects different devad if CL22 is used */
12753         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12754
12755         bnx2x_acquire_phy_lock(bp);
12756         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12757                              devad, addr, &value);
12758         bnx2x_release_phy_lock(bp);
12759         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12760
12761         if (!rc)
12762                 rc = value;
12763         return rc;
12764 }
12765
12766 /* called with rtnl_lock */
12767 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12768                             u16 addr, u16 value)
12769 {
12770         struct bnx2x *bp = netdev_priv(netdev);
12771         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12772         int rc;
12773
12774         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12775                            " value 0x%x\n", prtad, devad, addr, value);
12776
12777         if (prtad != bp->mdio.prtad) {
12778                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12779                    prtad, bp->mdio.prtad);
12780                 return -EINVAL;
12781         }
12782
12783         /* The HW expects different devad if CL22 is used */
12784         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12785
12786         bnx2x_acquire_phy_lock(bp);
12787         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12788                               devad, addr, value);
12789         bnx2x_release_phy_lock(bp);
12790         return rc;
12791 }
12792
12793 /* called with rtnl_lock */
12794 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12795 {
12796         struct bnx2x *bp = netdev_priv(dev);
12797         struct mii_ioctl_data *mdio = if_mii(ifr);
12798
12799         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12800            mdio->phy_id, mdio->reg_num, mdio->val_in);
12801
12802         if (!netif_running(dev))
12803                 return -EAGAIN;
12804
12805         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12806 }
12807
12808 /* called with rtnl_lock */
12809 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12810 {
12811         struct bnx2x *bp = netdev_priv(dev);
12812         int rc = 0;
12813
12814         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12815                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12816                 return -EAGAIN;
12817         }
12818
12819         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12820             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12821                 return -EINVAL;
12822
12823         /* This does not race with packet allocation
12824          * because the actual alloc size is
12825          * only updated as part of load
12826          */
12827         dev->mtu = new_mtu;
12828
12829         if (netif_running(dev)) {
12830                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12831                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12832         }
12833
12834         return rc;
12835 }
12836
12837 static void bnx2x_tx_timeout(struct net_device *dev)
12838 {
12839         struct bnx2x *bp = netdev_priv(dev);
12840
12841 #ifdef BNX2X_STOP_ON_ERROR
12842         if (!bp->panic)
12843                 bnx2x_panic();
12844 #endif
12845         /* This allows the netif to be shutdown gracefully before resetting */
12846         schedule_delayed_work(&bp->reset_task, 0);
12847 }
12848
12849 #ifdef BCM_VLAN
12850 /* called with rtnl_lock */
12851 static void bnx2x_vlan_rx_register(struct net_device *dev,
12852                                    struct vlan_group *vlgrp)
12853 {
12854         struct bnx2x *bp = netdev_priv(dev);
12855
12856         bp->vlgrp = vlgrp;
12857
12858         /* Set flags according to the required capabilities */
12859         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12860
12861         if (dev->features & NETIF_F_HW_VLAN_TX)
12862                 bp->flags |= HW_VLAN_TX_FLAG;
12863
12864         if (dev->features & NETIF_F_HW_VLAN_RX)
12865                 bp->flags |= HW_VLAN_RX_FLAG;
12866
12867         if (netif_running(dev))
12868                 bnx2x_set_client_config(bp);
12869 }
12870
12871 #endif
12872
12873 #ifdef CONFIG_NET_POLL_CONTROLLER
12874 static void poll_bnx2x(struct net_device *dev)
12875 {
12876         struct bnx2x *bp = netdev_priv(dev);
12877
12878         disable_irq(bp->pdev->irq);
12879         bnx2x_interrupt(bp->pdev->irq, dev);
12880         enable_irq(bp->pdev->irq);
12881 }
12882 #endif
12883
12884 static const struct net_device_ops bnx2x_netdev_ops = {
12885         .ndo_open               = bnx2x_open,
12886         .ndo_stop               = bnx2x_close,
12887         .ndo_start_xmit         = bnx2x_start_xmit,
12888         .ndo_set_multicast_list = bnx2x_set_rx_mode,
12889         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12890         .ndo_validate_addr      = eth_validate_addr,
12891         .ndo_do_ioctl           = bnx2x_ioctl,
12892         .ndo_change_mtu         = bnx2x_change_mtu,
12893         .ndo_tx_timeout         = bnx2x_tx_timeout,
12894 #ifdef BCM_VLAN
12895         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
12896 #endif
12897 #ifdef CONFIG_NET_POLL_CONTROLLER
12898         .ndo_poll_controller    = poll_bnx2x,
12899 #endif
12900 };
12901
12902 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12903                                     struct net_device *dev)
12904 {
12905         struct bnx2x *bp;
12906         int rc;
12907
12908         SET_NETDEV_DEV(dev, &pdev->dev);
12909         bp = netdev_priv(dev);
12910
12911         bp->dev = dev;
12912         bp->pdev = pdev;
12913         bp->flags = 0;
12914         bp->func = PCI_FUNC(pdev->devfn);
12915
12916         rc = pci_enable_device(pdev);
12917         if (rc) {
12918                 dev_err(&bp->pdev->dev,
12919                         "Cannot enable PCI device, aborting\n");
12920                 goto err_out;
12921         }
12922
12923         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12924                 dev_err(&bp->pdev->dev,
12925                         "Cannot find PCI device base address, aborting\n");
12926                 rc = -ENODEV;
12927                 goto err_out_disable;
12928         }
12929
12930         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12931                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12932                        " base address, aborting\n");
12933                 rc = -ENODEV;
12934                 goto err_out_disable;
12935         }
12936
12937         if (atomic_read(&pdev->enable_cnt) == 1) {
12938                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12939                 if (rc) {
12940                         dev_err(&bp->pdev->dev,
12941                                 "Cannot obtain PCI resources, aborting\n");
12942                         goto err_out_disable;
12943                 }
12944
12945                 pci_set_master(pdev);
12946                 pci_save_state(pdev);
12947         }
12948
12949         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12950         if (bp->pm_cap == 0) {
12951                 dev_err(&bp->pdev->dev,
12952                         "Cannot find power management capability, aborting\n");
12953                 rc = -EIO;
12954                 goto err_out_release;
12955         }
12956
12957         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12958         if (bp->pcie_cap == 0) {
12959                 dev_err(&bp->pdev->dev,
12960                         "Cannot find PCI Express capability, aborting\n");
12961                 rc = -EIO;
12962                 goto err_out_release;
12963         }
12964
12965         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12966                 bp->flags |= USING_DAC_FLAG;
12967                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12968                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12969                                " failed, aborting\n");
12970                         rc = -EIO;
12971                         goto err_out_release;
12972                 }
12973
12974         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12975                 dev_err(&bp->pdev->dev,
12976                         "System does not support DMA, aborting\n");
12977                 rc = -EIO;
12978                 goto err_out_release;
12979         }
12980
12981         dev->mem_start = pci_resource_start(pdev, 0);
12982         dev->base_addr = dev->mem_start;
12983         dev->mem_end = pci_resource_end(pdev, 0);
12984
12985         dev->irq = pdev->irq;
12986
12987         bp->regview = pci_ioremap_bar(pdev, 0);
12988         if (!bp->regview) {
12989                 dev_err(&bp->pdev->dev,
12990                         "Cannot map register space, aborting\n");
12991                 rc = -ENOMEM;
12992                 goto err_out_release;
12993         }
12994
12995         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12996                                         min_t(u64, BNX2X_DB_SIZE,
12997                                               pci_resource_len(pdev, 2)));
12998         if (!bp->doorbells) {
12999                 dev_err(&bp->pdev->dev,
13000                         "Cannot map doorbell space, aborting\n");
13001                 rc = -ENOMEM;
13002                 goto err_out_unmap;
13003         }
13004
13005         bnx2x_set_power_state(bp, PCI_D0);
13006
13007         /* clean indirect addresses */
13008         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13009                                PCICFG_VENDOR_ID_OFFSET);
13010         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
13011         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
13012         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
13013         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
13014
13015         /* Reset the load counter */
13016         bnx2x_clear_load_cnt(bp);
13017
13018         dev->watchdog_timeo = TX_TIMEOUT;
13019
13020         dev->netdev_ops = &bnx2x_netdev_ops;
13021         dev->ethtool_ops = &bnx2x_ethtool_ops;
13022         dev->features |= NETIF_F_SG;
13023         dev->features |= NETIF_F_HW_CSUM;
13024         if (bp->flags & USING_DAC_FLAG)
13025                 dev->features |= NETIF_F_HIGHDMA;
13026         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13027         dev->features |= NETIF_F_TSO6;
13028 #ifdef BCM_VLAN
13029         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13030         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13031
13032         dev->vlan_features |= NETIF_F_SG;
13033         dev->vlan_features |= NETIF_F_HW_CSUM;
13034         if (bp->flags & USING_DAC_FLAG)
13035                 dev->vlan_features |= NETIF_F_HIGHDMA;
13036         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13037         dev->vlan_features |= NETIF_F_TSO6;
13038 #endif
13039
13040         /* get_port_hwinfo() will set prtad and mmds properly */
13041         bp->mdio.prtad = MDIO_PRTAD_NONE;
13042         bp->mdio.mmds = 0;
13043         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13044         bp->mdio.dev = dev;
13045         bp->mdio.mdio_read = bnx2x_mdio_read;
13046         bp->mdio.mdio_write = bnx2x_mdio_write;
13047
13048         return 0;
13049
13050 err_out_unmap:
13051         if (bp->regview) {
13052                 iounmap(bp->regview);
13053                 bp->regview = NULL;
13054         }
13055         if (bp->doorbells) {
13056                 iounmap(bp->doorbells);
13057                 bp->doorbells = NULL;
13058         }
13059
13060 err_out_release:
13061         if (atomic_read(&pdev->enable_cnt) == 1)
13062                 pci_release_regions(pdev);
13063
13064 err_out_disable:
13065         pci_disable_device(pdev);
13066         pci_set_drvdata(pdev, NULL);
13067
13068 err_out:
13069         return rc;
13070 }
13071
13072 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13073                                                  int *width, int *speed)
13074 {
13075         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13076
13077         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13078
13079         /* return value of 1=2.5GHz 2=5GHz */
13080         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13081 }
13082
13083 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13084 {
13085         const struct firmware *firmware = bp->firmware;
13086         struct bnx2x_fw_file_hdr *fw_hdr;
13087         struct bnx2x_fw_file_section *sections;
13088         u32 offset, len, num_ops;
13089         u16 *ops_offsets;
13090         int i;
13091         const u8 *fw_ver;
13092
13093         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13094                 return -EINVAL;
13095
13096         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13097         sections = (struct bnx2x_fw_file_section *)fw_hdr;
13098
13099         /* Make sure none of the offsets and sizes make us read beyond
13100          * the end of the firmware data */
13101         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13102                 offset = be32_to_cpu(sections[i].offset);
13103                 len = be32_to_cpu(sections[i].len);
13104                 if (offset + len > firmware->size) {
13105                         dev_err(&bp->pdev->dev,
13106                                 "Section %d length is out of bounds\n", i);
13107                         return -EINVAL;
13108                 }
13109         }
13110
13111         /* Likewise for the init_ops offsets */
13112         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13113         ops_offsets = (u16 *)(firmware->data + offset);
13114         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13115
13116         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13117                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13118                         dev_err(&bp->pdev->dev,
13119                                 "Section offset %d is out of bounds\n", i);
13120                         return -EINVAL;
13121                 }
13122         }
13123
13124         /* Check FW version */
13125         offset = be32_to_cpu(fw_hdr->fw_version.offset);
13126         fw_ver = firmware->data + offset;
13127         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13128             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13129             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13130             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13131                 dev_err(&bp->pdev->dev,
13132                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13133                        fw_ver[0], fw_ver[1], fw_ver[2],
13134                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13135                        BCM_5710_FW_MINOR_VERSION,
13136                        BCM_5710_FW_REVISION_VERSION,
13137                        BCM_5710_FW_ENGINEERING_VERSION);
13138                 return -EINVAL;
13139         }
13140
13141         return 0;
13142 }
13143
13144 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13145 {
13146         const __be32 *source = (const __be32 *)_source;
13147         u32 *target = (u32 *)_target;
13148         u32 i;
13149
13150         for (i = 0; i < n/4; i++)
13151                 target[i] = be32_to_cpu(source[i]);
13152 }
13153
13154 /*
13155    Ops array is stored in the following format:
13156    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13157  */
13158 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13159 {
13160         const __be32 *source = (const __be32 *)_source;
13161         struct raw_op *target = (struct raw_op *)_target;
13162         u32 i, j, tmp;
13163
13164         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13165                 tmp = be32_to_cpu(source[j]);
13166                 target[i].op = (tmp >> 24) & 0xff;
13167                 target[i].offset = tmp & 0xffffff;
13168                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13169         }
13170 }
13171
13172 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13173 {
13174         const __be16 *source = (const __be16 *)_source;
13175         u16 *target = (u16 *)_target;
13176         u32 i;
13177
13178         for (i = 0; i < n/2; i++)
13179                 target[i] = be16_to_cpu(source[i]);
13180 }
13181
13182 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13183 do {                                                                    \
13184         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13185         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13186         if (!bp->arr) {                                                 \
13187                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13188                 goto lbl;                                               \
13189         }                                                               \
13190         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13191              (u8 *)bp->arr, len);                                       \
13192 } while (0)
13193
13194 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13195 {
13196         const char *fw_file_name;
13197         struct bnx2x_fw_file_hdr *fw_hdr;
13198         int rc;
13199
13200         if (CHIP_IS_E1(bp))
13201                 fw_file_name = FW_FILE_NAME_E1;
13202         else if (CHIP_IS_E1H(bp))
13203                 fw_file_name = FW_FILE_NAME_E1H;
13204         else {
13205                 dev_err(dev, "Unsupported chip revision\n");
13206                 return -EINVAL;
13207         }
13208
13209         dev_info(dev, "Loading %s\n", fw_file_name);
13210
13211         rc = request_firmware(&bp->firmware, fw_file_name, dev);
13212         if (rc) {
13213                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13214                 goto request_firmware_exit;
13215         }
13216
13217         rc = bnx2x_check_firmware(bp);
13218         if (rc) {
13219                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13220                 goto request_firmware_exit;
13221         }
13222
13223         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13224
13225         /* Initialize the pointers to the init arrays */
13226         /* Blob */
13227         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13228
13229         /* Opcodes */
13230         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13231
13232         /* Offsets */
13233         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13234                             be16_to_cpu_n);
13235
13236         /* STORMs firmware */
13237         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13238                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13239         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13240                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13241         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13242                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13243         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13244                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13245         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13246                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13247         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13248                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13249         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13250                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13251         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13252                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13253
13254         return 0;
13255
13256 init_offsets_alloc_err:
13257         kfree(bp->init_ops);
13258 init_ops_alloc_err:
13259         kfree(bp->init_data);
13260 request_firmware_exit:
13261         release_firmware(bp->firmware);
13262
13263         return rc;
13264 }
13265
13266
13267 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13268                                     const struct pci_device_id *ent)
13269 {
13270         struct net_device *dev = NULL;
13271         struct bnx2x *bp;
13272         int pcie_width, pcie_speed;
13273         int rc;
13274
13275         /* dev zeroed in init_etherdev */
13276         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13277         if (!dev) {
13278                 dev_err(&pdev->dev, "Cannot allocate net device\n");
13279                 return -ENOMEM;
13280         }
13281
13282         bp = netdev_priv(dev);
13283         bp->msg_enable = debug;
13284
13285         pci_set_drvdata(pdev, dev);
13286
13287         rc = bnx2x_init_dev(pdev, dev);
13288         if (rc < 0) {
13289                 free_netdev(dev);
13290                 return rc;
13291         }
13292
13293         rc = bnx2x_init_bp(bp);
13294         if (rc)
13295                 goto init_one_exit;
13296
13297         /* Set init arrays */
13298         rc = bnx2x_init_firmware(bp, &pdev->dev);
13299         if (rc) {
13300                 dev_err(&pdev->dev, "Error loading firmware\n");
13301                 goto init_one_exit;
13302         }
13303
13304         rc = register_netdev(dev);
13305         if (rc) {
13306                 dev_err(&pdev->dev, "Cannot register net device\n");
13307                 goto init_one_exit;
13308         }
13309
13310         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13311         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13312                " IRQ %d, ", board_info[ent->driver_data].name,
13313                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13314                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13315                dev->base_addr, bp->pdev->irq);
13316         pr_cont("node addr %pM\n", dev->dev_addr);
13317
13318         return 0;
13319
13320 init_one_exit:
13321         if (bp->regview)
13322                 iounmap(bp->regview);
13323
13324         if (bp->doorbells)
13325                 iounmap(bp->doorbells);
13326
13327         free_netdev(dev);
13328
13329         if (atomic_read(&pdev->enable_cnt) == 1)
13330                 pci_release_regions(pdev);
13331
13332         pci_disable_device(pdev);
13333         pci_set_drvdata(pdev, NULL);
13334
13335         return rc;
13336 }
13337
13338 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13339 {
13340         struct net_device *dev = pci_get_drvdata(pdev);
13341         struct bnx2x *bp;
13342
13343         if (!dev) {
13344                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13345                 return;
13346         }
13347         bp = netdev_priv(dev);
13348
13349         unregister_netdev(dev);
13350
13351         /* Make sure RESET task is not scheduled before continuing */
13352         cancel_delayed_work_sync(&bp->reset_task);
13353
13354         kfree(bp->init_ops_offsets);
13355         kfree(bp->init_ops);
13356         kfree(bp->init_data);
13357         release_firmware(bp->firmware);
13358
13359         if (bp->regview)
13360                 iounmap(bp->regview);
13361
13362         if (bp->doorbells)
13363                 iounmap(bp->doorbells);
13364
13365         free_netdev(dev);
13366
13367         if (atomic_read(&pdev->enable_cnt) == 1)
13368                 pci_release_regions(pdev);
13369
13370         pci_disable_device(pdev);
13371         pci_set_drvdata(pdev, NULL);
13372 }
13373
13374 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13375 {
13376         struct net_device *dev = pci_get_drvdata(pdev);
13377         struct bnx2x *bp;
13378
13379         if (!dev) {
13380                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13381                 return -ENODEV;
13382         }
13383         bp = netdev_priv(dev);
13384
13385         rtnl_lock();
13386
13387         pci_save_state(pdev);
13388
13389         if (!netif_running(dev)) {
13390                 rtnl_unlock();
13391                 return 0;
13392         }
13393
13394         netif_device_detach(dev);
13395
13396         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13397
13398         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13399
13400         rtnl_unlock();
13401
13402         return 0;
13403 }
13404
13405 static int bnx2x_resume(struct pci_dev *pdev)
13406 {
13407         struct net_device *dev = pci_get_drvdata(pdev);
13408         struct bnx2x *bp;
13409         int rc;
13410
13411         if (!dev) {
13412                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13413                 return -ENODEV;
13414         }
13415         bp = netdev_priv(dev);
13416
13417         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13418                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13419                 return -EAGAIN;
13420         }
13421
13422         rtnl_lock();
13423
13424         pci_restore_state(pdev);
13425
13426         if (!netif_running(dev)) {
13427                 rtnl_unlock();
13428                 return 0;
13429         }
13430
13431         bnx2x_set_power_state(bp, PCI_D0);
13432         netif_device_attach(dev);
13433
13434         rc = bnx2x_nic_load(bp, LOAD_OPEN);
13435
13436         rtnl_unlock();
13437
13438         return rc;
13439 }
13440
13441 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13442 {
13443         int i;
13444
13445         bp->state = BNX2X_STATE_ERROR;
13446
13447         bp->rx_mode = BNX2X_RX_MODE_NONE;
13448
13449         bnx2x_netif_stop(bp, 0);
13450         netif_carrier_off(bp->dev);
13451
13452         del_timer_sync(&bp->timer);
13453         bp->stats_state = STATS_STATE_DISABLED;
13454         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13455
13456         /* Release IRQs */
13457         bnx2x_free_irq(bp, false);
13458
13459         if (CHIP_IS_E1(bp)) {
13460                 struct mac_configuration_cmd *config =
13461                                                 bnx2x_sp(bp, mcast_config);
13462
13463                 for (i = 0; i < config->hdr.length; i++)
13464                         CAM_INVALIDATE(config->config_table[i]);
13465         }
13466
13467         /* Free SKBs, SGEs, TPA pool and driver internals */
13468         bnx2x_free_skbs(bp);
13469         for_each_queue(bp, i)
13470                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13471         for_each_queue(bp, i)
13472                 netif_napi_del(&bnx2x_fp(bp, i, napi));
13473         bnx2x_free_mem(bp);
13474
13475         bp->state = BNX2X_STATE_CLOSED;
13476
13477         return 0;
13478 }
13479
13480 static void bnx2x_eeh_recover(struct bnx2x *bp)
13481 {
13482         u32 val;
13483
13484         mutex_init(&bp->port.phy_mutex);
13485
13486         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13487         bp->link_params.shmem_base = bp->common.shmem_base;
13488         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13489
13490         if (!bp->common.shmem_base ||
13491             (bp->common.shmem_base < 0xA0000) ||
13492             (bp->common.shmem_base >= 0xC0000)) {
13493                 BNX2X_DEV_INFO("MCP not active\n");
13494                 bp->flags |= NO_MCP_FLAG;
13495                 return;
13496         }
13497
13498         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13499         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13500                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13501                 BNX2X_ERR("BAD MCP validity signature\n");
13502
13503         if (!BP_NOMCP(bp)) {
13504                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13505                               & DRV_MSG_SEQ_NUMBER_MASK);
13506                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13507         }
13508 }
13509
13510 /**
13511  * bnx2x_io_error_detected - called when PCI error is detected
13512  * @pdev: Pointer to PCI device
13513  * @state: The current pci connection state
13514  *
13515  * This function is called after a PCI bus error affecting
13516  * this device has been detected.
13517  */
13518 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13519                                                 pci_channel_state_t state)
13520 {
13521         struct net_device *dev = pci_get_drvdata(pdev);
13522         struct bnx2x *bp = netdev_priv(dev);
13523
13524         rtnl_lock();
13525
13526         netif_device_detach(dev);
13527
13528         if (state == pci_channel_io_perm_failure) {
13529                 rtnl_unlock();
13530                 return PCI_ERS_RESULT_DISCONNECT;
13531         }
13532
13533         if (netif_running(dev))
13534                 bnx2x_eeh_nic_unload(bp);
13535
13536         pci_disable_device(pdev);
13537
13538         rtnl_unlock();
13539
13540         /* Request a slot reset */
13541         return PCI_ERS_RESULT_NEED_RESET;
13542 }
13543
13544 /**
13545  * bnx2x_io_slot_reset - called after the PCI bus has been reset
13546  * @pdev: Pointer to PCI device
13547  *
13548  * Restart the card from scratch, as if from a cold-boot.
13549  */
13550 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13551 {
13552         struct net_device *dev = pci_get_drvdata(pdev);
13553         struct bnx2x *bp = netdev_priv(dev);
13554
13555         rtnl_lock();
13556
13557         if (pci_enable_device(pdev)) {
13558                 dev_err(&pdev->dev,
13559                         "Cannot re-enable PCI device after reset\n");
13560                 rtnl_unlock();
13561                 return PCI_ERS_RESULT_DISCONNECT;
13562         }
13563
13564         pci_set_master(pdev);
13565         pci_restore_state(pdev);
13566
13567         if (netif_running(dev))
13568                 bnx2x_set_power_state(bp, PCI_D0);
13569
13570         rtnl_unlock();
13571
13572         return PCI_ERS_RESULT_RECOVERED;
13573 }
13574
13575 /**
13576  * bnx2x_io_resume - called when traffic can start flowing again
13577  * @pdev: Pointer to PCI device
13578  *
13579  * This callback is called when the error recovery driver tells us that
13580  * its OK to resume normal operation.
13581  */
13582 static void bnx2x_io_resume(struct pci_dev *pdev)
13583 {
13584         struct net_device *dev = pci_get_drvdata(pdev);
13585         struct bnx2x *bp = netdev_priv(dev);
13586
13587         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13588                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13589                 return;
13590         }
13591
13592         rtnl_lock();
13593
13594         bnx2x_eeh_recover(bp);
13595
13596         if (netif_running(dev))
13597                 bnx2x_nic_load(bp, LOAD_NORMAL);
13598
13599         netif_device_attach(dev);
13600
13601         rtnl_unlock();
13602 }
13603
13604 static struct pci_error_handlers bnx2x_err_handler = {
13605         .error_detected = bnx2x_io_error_detected,
13606         .slot_reset     = bnx2x_io_slot_reset,
13607         .resume         = bnx2x_io_resume,
13608 };
13609
13610 static struct pci_driver bnx2x_pci_driver = {
13611         .name        = DRV_MODULE_NAME,
13612         .id_table    = bnx2x_pci_tbl,
13613         .probe       = bnx2x_init_one,
13614         .remove      = __devexit_p(bnx2x_remove_one),
13615         .suspend     = bnx2x_suspend,
13616         .resume      = bnx2x_resume,
13617         .err_handler = &bnx2x_err_handler,
13618 };
13619
13620 static int __init bnx2x_init(void)
13621 {
13622         int ret;
13623
13624         pr_info("%s", version);
13625
13626         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13627         if (bnx2x_wq == NULL) {
13628                 pr_err("Cannot create workqueue\n");
13629                 return -ENOMEM;
13630         }
13631
13632         ret = pci_register_driver(&bnx2x_pci_driver);
13633         if (ret) {
13634                 pr_err("Cannot register driver\n");
13635                 destroy_workqueue(bnx2x_wq);
13636         }
13637         return ret;
13638 }
13639
13640 static void __exit bnx2x_cleanup(void)
13641 {
13642         pci_unregister_driver(&bnx2x_pci_driver);
13643
13644         destroy_workqueue(bnx2x_wq);
13645 }
13646
13647 module_init(bnx2x_init);
13648 module_exit(bnx2x_cleanup);
13649
13650 #ifdef BCM_CNIC
13651
13652 /* count denotes the number of new completions we have seen */
13653 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13654 {
13655         struct eth_spe *spe;
13656
13657 #ifdef BNX2X_STOP_ON_ERROR
13658         if (unlikely(bp->panic))
13659                 return;
13660 #endif
13661
13662         spin_lock_bh(&bp->spq_lock);
13663         bp->cnic_spq_pending -= count;
13664
13665         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13666              bp->cnic_spq_pending++) {
13667
13668                 if (!bp->cnic_kwq_pending)
13669                         break;
13670
13671                 spe = bnx2x_sp_get_next(bp);
13672                 *spe = *bp->cnic_kwq_cons;
13673
13674                 bp->cnic_kwq_pending--;
13675
13676                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13677                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13678
13679                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13680                         bp->cnic_kwq_cons = bp->cnic_kwq;
13681                 else
13682                         bp->cnic_kwq_cons++;
13683         }
13684         bnx2x_sp_prod_update(bp);
13685         spin_unlock_bh(&bp->spq_lock);
13686 }
13687
13688 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13689                                struct kwqe_16 *kwqes[], u32 count)
13690 {
13691         struct bnx2x *bp = netdev_priv(dev);
13692         int i;
13693
13694 #ifdef BNX2X_STOP_ON_ERROR
13695         if (unlikely(bp->panic))
13696                 return -EIO;
13697 #endif
13698
13699         spin_lock_bh(&bp->spq_lock);
13700
13701         for (i = 0; i < count; i++) {
13702                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13703
13704                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13705                         break;
13706
13707                 *bp->cnic_kwq_prod = *spe;
13708
13709                 bp->cnic_kwq_pending++;
13710
13711                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13712                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13713                    spe->data.mac_config_addr.hi,
13714                    spe->data.mac_config_addr.lo,
13715                    bp->cnic_kwq_pending);
13716
13717                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13718                         bp->cnic_kwq_prod = bp->cnic_kwq;
13719                 else
13720                         bp->cnic_kwq_prod++;
13721         }
13722
13723         spin_unlock_bh(&bp->spq_lock);
13724
13725         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13726                 bnx2x_cnic_sp_post(bp, 0);
13727
13728         return i;
13729 }
13730
13731 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13732 {
13733         struct cnic_ops *c_ops;
13734         int rc = 0;
13735
13736         mutex_lock(&bp->cnic_mutex);
13737         c_ops = bp->cnic_ops;
13738         if (c_ops)
13739                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13740         mutex_unlock(&bp->cnic_mutex);
13741
13742         return rc;
13743 }
13744
13745 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13746 {
13747         struct cnic_ops *c_ops;
13748         int rc = 0;
13749
13750         rcu_read_lock();
13751         c_ops = rcu_dereference(bp->cnic_ops);
13752         if (c_ops)
13753                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13754         rcu_read_unlock();
13755
13756         return rc;
13757 }
13758
13759 /*
13760  * for commands that have no data
13761  */
13762 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13763 {
13764         struct cnic_ctl_info ctl = {0};
13765
13766         ctl.cmd = cmd;
13767
13768         return bnx2x_cnic_ctl_send(bp, &ctl);
13769 }
13770
13771 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13772 {
13773         struct cnic_ctl_info ctl;
13774
13775         /* first we tell CNIC and only then we count this as a completion */
13776         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13777         ctl.data.comp.cid = cid;
13778
13779         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13780         bnx2x_cnic_sp_post(bp, 1);
13781 }
13782
13783 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13784 {
13785         struct bnx2x *bp = netdev_priv(dev);
13786         int rc = 0;
13787
13788         switch (ctl->cmd) {
13789         case DRV_CTL_CTXTBL_WR_CMD: {
13790                 u32 index = ctl->data.io.offset;
13791                 dma_addr_t addr = ctl->data.io.dma_addr;
13792
13793                 bnx2x_ilt_wr(bp, index, addr);
13794                 break;
13795         }
13796
13797         case DRV_CTL_COMPLETION_CMD: {
13798                 int count = ctl->data.comp.comp_count;
13799
13800                 bnx2x_cnic_sp_post(bp, count);
13801                 break;
13802         }
13803
13804         /* rtnl_lock is held.  */
13805         case DRV_CTL_START_L2_CMD: {
13806                 u32 cli = ctl->data.ring.client_id;
13807
13808                 bp->rx_mode_cl_mask |= (1 << cli);
13809                 bnx2x_set_storm_rx_mode(bp);
13810                 break;
13811         }
13812
13813         /* rtnl_lock is held.  */
13814         case DRV_CTL_STOP_L2_CMD: {
13815                 u32 cli = ctl->data.ring.client_id;
13816
13817                 bp->rx_mode_cl_mask &= ~(1 << cli);
13818                 bnx2x_set_storm_rx_mode(bp);
13819                 break;
13820         }
13821
13822         default:
13823                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13824                 rc = -EINVAL;
13825         }
13826
13827         return rc;
13828 }
13829
13830 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13831 {
13832         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13833
13834         if (bp->flags & USING_MSIX_FLAG) {
13835                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13836                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13837                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13838         } else {
13839                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13840                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13841         }
13842         cp->irq_arr[0].status_blk = bp->cnic_sb;
13843         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13844         cp->irq_arr[1].status_blk = bp->def_status_blk;
13845         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13846
13847         cp->num_irq = 2;
13848 }
13849
13850 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13851                                void *data)
13852 {
13853         struct bnx2x *bp = netdev_priv(dev);
13854         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13855
13856         if (ops == NULL)
13857                 return -EINVAL;
13858
13859         if (atomic_read(&bp->intr_sem) != 0)
13860                 return -EBUSY;
13861
13862         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13863         if (!bp->cnic_kwq)
13864                 return -ENOMEM;
13865
13866         bp->cnic_kwq_cons = bp->cnic_kwq;
13867         bp->cnic_kwq_prod = bp->cnic_kwq;
13868         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13869
13870         bp->cnic_spq_pending = 0;
13871         bp->cnic_kwq_pending = 0;
13872
13873         bp->cnic_data = data;
13874
13875         cp->num_irq = 0;
13876         cp->drv_state = CNIC_DRV_STATE_REGD;
13877
13878         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13879
13880         bnx2x_setup_cnic_irq_info(bp);
13881         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13882         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13883         rcu_assign_pointer(bp->cnic_ops, ops);
13884
13885         return 0;
13886 }
13887
13888 static int bnx2x_unregister_cnic(struct net_device *dev)
13889 {
13890         struct bnx2x *bp = netdev_priv(dev);
13891         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13892
13893         mutex_lock(&bp->cnic_mutex);
13894         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13895                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13896                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13897         }
13898         cp->drv_state = 0;
13899         rcu_assign_pointer(bp->cnic_ops, NULL);
13900         mutex_unlock(&bp->cnic_mutex);
13901         synchronize_rcu();
13902         kfree(bp->cnic_kwq);
13903         bp->cnic_kwq = NULL;
13904
13905         return 0;
13906 }
13907
13908 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13909 {
13910         struct bnx2x *bp = netdev_priv(dev);
13911         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13912
13913         cp->drv_owner = THIS_MODULE;
13914         cp->chip_id = CHIP_ID(bp);
13915         cp->pdev = bp->pdev;
13916         cp->io_base = bp->regview;
13917         cp->io_base2 = bp->doorbells;
13918         cp->max_kwqe_pending = 8;
13919         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13920         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13921         cp->ctx_tbl_len = CNIC_ILT_LINES;
13922         cp->starting_cid = BCM_CNIC_CID_START;
13923         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13924         cp->drv_ctl = bnx2x_drv_ctl;
13925         cp->drv_register_cnic = bnx2x_register_cnic;
13926         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13927
13928         return cp;
13929 }
13930 EXPORT_SYMBOL(bnx2x_cnic_probe);
13931
13932 #endif /* BCM_CNIC */
13933
This page took 0.866724 seconds and 4 git commands to generate.