2 * tg3.c: Broadcom Tigon3 ethernet driver.
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
110 /* length of time before we decide the hardware is borked,
111 * and dev->tx_timeout() should be called to fix the problem
114 #define TG3_TX_TIMEOUT (5 * HZ)
116 /* hardware minimum and maximum for a single frame's data payload */
117 #define TG3_MIN_MTU 60
118 #define TG3_MAX_MTU(tp) \
119 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
121 /* These numbers seem to be hard coded in the NIC firmware somehow.
122 * You can't change the ring sizes, but you can change where you place
123 * them in the NIC onboard memory.
125 #define TG3_RX_STD_RING_SIZE(tp) \
126 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
127 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
128 #define TG3_DEF_RX_RING_PENDING 200
129 #define TG3_RX_JMB_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
132 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
133 #define TG3_RSS_INDIR_TBL_SIZE 128
135 /* Do not place this n-ring entries value into the tp struct itself,
136 * we really want to expose these constants to GCC so that modulo et
137 * al. operations are done with shifts and masks instead of with
138 * hw multiply/modulo instructions. Another solution would be to
139 * replace things like '% foo' with '& (foo - 1)'.
142 #define TG3_TX_RING_SIZE 512
143 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
145 #define TG3_RX_STD_RING_BYTES(tp) \
146 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
153 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
155 #define TG3_DMA_BYTE_ENAB 64
157 #define TG3_RX_STD_DMA_SZ 1536
158 #define TG3_RX_JMB_DMA_SZ 9046
160 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
162 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172 * that are at least dword aligned when used in PCIX mode. The driver
173 * works around this bug by double copying the packet. This workaround
174 * is built into the normal double copy length check for efficiency.
176 * However, the double copy is only necessary on those architectures
177 * where unaligned memory accesses are inefficient. For those architectures
178 * where unaligned memory accesses incur little penalty, we can reintegrate
179 * the 5701 in the normal rx path. Doing so saves a device structure
180 * dereference by hardcoding the double copy threshold in place.
182 #define TG3_RX_COPY_THRESHOLD 256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
186 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
192 #define TG3_RAW_IP_ALIGN 2
194 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
196 #define FIRMWARE_TG3 "tigon/tg3.bin"
197 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
198 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
200 static char version[] __devinitdata =
201 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
205 MODULE_LICENSE("GPL");
206 MODULE_VERSION(DRV_MODULE_VERSION);
207 MODULE_FIRMWARE(FIRMWARE_TG3);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
209 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
211 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
212 module_param(tg3_debug, int, 0);
213 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
215 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
295 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
296 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
300 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
302 static const struct {
303 const char string[ETH_GSTRING_LEN];
304 } ethtool_stats_keys[] = {
307 { "rx_ucast_packets" },
308 { "rx_mcast_packets" },
309 { "rx_bcast_packets" },
311 { "rx_align_errors" },
312 { "rx_xon_pause_rcvd" },
313 { "rx_xoff_pause_rcvd" },
314 { "rx_mac_ctrl_rcvd" },
315 { "rx_xoff_entered" },
316 { "rx_frame_too_long_errors" },
318 { "rx_undersize_packets" },
319 { "rx_in_length_errors" },
320 { "rx_out_length_errors" },
321 { "rx_64_or_less_octet_packets" },
322 { "rx_65_to_127_octet_packets" },
323 { "rx_128_to_255_octet_packets" },
324 { "rx_256_to_511_octet_packets" },
325 { "rx_512_to_1023_octet_packets" },
326 { "rx_1024_to_1522_octet_packets" },
327 { "rx_1523_to_2047_octet_packets" },
328 { "rx_2048_to_4095_octet_packets" },
329 { "rx_4096_to_8191_octet_packets" },
330 { "rx_8192_to_9022_octet_packets" },
337 { "tx_flow_control" },
339 { "tx_single_collisions" },
340 { "tx_mult_collisions" },
342 { "tx_excessive_collisions" },
343 { "tx_late_collisions" },
344 { "tx_collide_2times" },
345 { "tx_collide_3times" },
346 { "tx_collide_4times" },
347 { "tx_collide_5times" },
348 { "tx_collide_6times" },
349 { "tx_collide_7times" },
350 { "tx_collide_8times" },
351 { "tx_collide_9times" },
352 { "tx_collide_10times" },
353 { "tx_collide_11times" },
354 { "tx_collide_12times" },
355 { "tx_collide_13times" },
356 { "tx_collide_14times" },
357 { "tx_collide_15times" },
358 { "tx_ucast_packets" },
359 { "tx_mcast_packets" },
360 { "tx_bcast_packets" },
361 { "tx_carrier_sense_errors" },
365 { "dma_writeq_full" },
366 { "dma_write_prioq_full" },
370 { "rx_threshold_hit" },
372 { "dma_readq_full" },
373 { "dma_read_prioq_full" },
374 { "tx_comp_queue_full" },
376 { "ring_set_send_prod_index" },
377 { "ring_status_update" },
379 { "nic_avoided_irqs" },
380 { "nic_tx_threshold_hit" },
382 { "mbuf_lwm_thresh_hit" },
385 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
388 static const struct {
389 const char string[ETH_GSTRING_LEN];
390 } ethtool_test_keys[] = {
391 { "nvram test (online) " },
392 { "link test (online) " },
393 { "register test (offline)" },
394 { "memory test (offline)" },
395 { "loopback test (offline)" },
396 { "interrupt test (offline)" },
399 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
402 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
404 writel(val, tp->regs + off);
407 static u32 tg3_read32(struct tg3 *tp, u32 off)
409 return readl(tp->regs + off);
412 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
414 writel(val, tp->aperegs + off);
417 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
419 return readl(tp->aperegs + off);
422 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
426 spin_lock_irqsave(&tp->indirect_lock, flags);
427 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
428 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
429 spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
434 writel(val, tp->regs + off);
435 readl(tp->regs + off);
438 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
443 spin_lock_irqsave(&tp->indirect_lock, flags);
444 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
446 spin_unlock_irqrestore(&tp->indirect_lock, flags);
450 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
454 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
455 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
456 TG3_64BIT_REG_LOW, val);
459 if (off == TG3_RX_STD_PROD_IDX_REG) {
460 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
461 TG3_64BIT_REG_LOW, val);
465 spin_lock_irqsave(&tp->indirect_lock, flags);
466 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
467 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
468 spin_unlock_irqrestore(&tp->indirect_lock, flags);
470 /* In indirect mode when disabling interrupts, we also need
471 * to clear the interrupt bit in the GRC local ctrl register.
473 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
475 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
476 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
480 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
485 spin_lock_irqsave(&tp->indirect_lock, flags);
486 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
487 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
488 spin_unlock_irqrestore(&tp->indirect_lock, flags);
492 /* usec_wait specifies the wait time in usec when writing to certain registers
493 * where it is unsafe to read back the register without some delay.
494 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
495 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
497 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
499 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
500 /* Non-posted methods */
501 tp->write32(tp, off, val);
504 tg3_write32(tp, off, val);
509 /* Wait again after the read for the posted method to guarantee that
510 * the wait time is met.
516 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
518 tp->write32_mbox(tp, off, val);
519 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
520 tp->read32_mbox(tp, off);
523 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
525 void __iomem *mbox = tp->regs + off;
527 if (tg3_flag(tp, TXD_MBOX_HWBUG))
529 if (tg3_flag(tp, MBOX_WRITE_REORDER))
533 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
535 return readl(tp->regs + off + GRCMBOX_BASE);
538 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
540 writel(val, tp->regs + off + GRCMBOX_BASE);
543 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
544 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
545 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
546 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
547 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
549 #define tw32(reg, val) tp->write32(tp, reg, val)
550 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
551 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
552 #define tr32(reg) tp->read32(tp, reg)
554 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
559 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562 spin_lock_irqsave(&tp->indirect_lock, flags);
563 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
564 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
567 /* Always leave this as zero. */
568 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
570 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
571 tw32_f(TG3PCI_MEM_WIN_DATA, val);
573 /* Always leave this as zero. */
574 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
576 spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
584 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
589 spin_lock_irqsave(&tp->indirect_lock, flags);
590 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
591 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
592 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
594 /* Always leave this as zero. */
595 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
597 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
598 *val = tr32(TG3PCI_MEM_WIN_DATA);
600 /* Always leave this as zero. */
601 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
603 spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 static void tg3_ape_lock_init(struct tg3 *tp)
611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
612 regbase = TG3_APE_LOCK_GRANT;
614 regbase = TG3_APE_PER_LOCK_GRANT;
616 /* Make sure the driver hasn't any stale locks. */
617 for (i = 0; i < 8; i++)
618 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
621 static int tg3_ape_lock(struct tg3 *tp, int locknum)
625 u32 status, req, gnt;
627 if (!tg3_flag(tp, ENABLE_APE))
631 case TG3_APE_LOCK_GRC:
632 case TG3_APE_LOCK_MEM:
638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
639 req = TG3_APE_LOCK_REQ;
640 gnt = TG3_APE_LOCK_GRANT;
642 req = TG3_APE_PER_LOCK_REQ;
643 gnt = TG3_APE_PER_LOCK_GRANT;
648 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
650 /* Wait for up to 1 millisecond to acquire lock. */
651 for (i = 0; i < 100; i++) {
652 status = tg3_ape_read32(tp, gnt + off);
653 if (status == APE_LOCK_GRANT_DRIVER)
658 if (status != APE_LOCK_GRANT_DRIVER) {
659 /* Revoke the lock request. */
660 tg3_ape_write32(tp, gnt + off,
661 APE_LOCK_GRANT_DRIVER);
669 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
673 if (!tg3_flag(tp, ENABLE_APE))
677 case TG3_APE_LOCK_GRC:
678 case TG3_APE_LOCK_MEM:
684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
685 gnt = TG3_APE_LOCK_GRANT;
687 gnt = TG3_APE_PER_LOCK_GRANT;
689 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
692 static void tg3_disable_ints(struct tg3 *tp)
696 tw32(TG3PCI_MISC_HOST_CTRL,
697 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
698 for (i = 0; i < tp->irq_max; i++)
699 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
702 static void tg3_enable_ints(struct tg3 *tp)
709 tw32(TG3PCI_MISC_HOST_CTRL,
710 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
712 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
713 for (i = 0; i < tp->irq_cnt; i++) {
714 struct tg3_napi *tnapi = &tp->napi[i];
716 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717 if (tg3_flag(tp, 1SHOT_MSI))
718 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
720 tp->coal_now |= tnapi->coal_now;
723 /* Force an initial interrupt */
724 if (!tg3_flag(tp, TAGGED_STATUS) &&
725 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
726 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
728 tw32(HOSTCC_MODE, tp->coal_now);
730 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
733 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
735 struct tg3 *tp = tnapi->tp;
736 struct tg3_hw_status *sblk = tnapi->hw_status;
737 unsigned int work_exists = 0;
739 /* check for phy events */
740 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
741 if (sblk->status & SD_STATUS_LINK_CHG)
744 /* check for RX/TX work to do */
745 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
746 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
753 * similar to tg3_enable_ints, but it accurately determines whether there
754 * is new work pending and can return without flushing the PIO write
755 * which reenables interrupts
757 static void tg3_int_reenable(struct tg3_napi *tnapi)
759 struct tg3 *tp = tnapi->tp;
761 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
764 /* When doing tagged status, this work check is unnecessary.
765 * The last_tag we write above tells the chip which piece of
766 * work we've completed.
768 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
769 tw32(HOSTCC_MODE, tp->coalesce_mode |
770 HOSTCC_MODE_ENABLE | tnapi->coal_now);
773 static void tg3_switch_clocks(struct tg3 *tp)
778 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
781 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
783 orig_clock_ctrl = clock_ctrl;
784 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
785 CLOCK_CTRL_CLKRUN_OENABLE |
787 tp->pci_clock_ctrl = clock_ctrl;
789 if (tg3_flag(tp, 5705_PLUS)) {
790 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
791 tw32_wait_f(TG3PCI_CLOCK_CTRL,
792 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
794 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
795 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
799 tw32_wait_f(TG3PCI_CLOCK_CTRL,
800 clock_ctrl | (CLOCK_CTRL_ALTCLK),
803 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
806 #define PHY_BUSY_LOOPS 5000
808 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
814 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
816 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
822 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
823 MI_COM_PHY_ADDR_MASK);
824 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
825 MI_COM_REG_ADDR_MASK);
826 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
828 tw32_f(MAC_MI_COM, frame_val);
830 loops = PHY_BUSY_LOOPS;
833 frame_val = tr32(MAC_MI_COM);
835 if ((frame_val & MI_COM_BUSY) == 0) {
837 frame_val = tr32(MAC_MI_COM);
845 *val = frame_val & MI_COM_DATA_MASK;
849 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
850 tw32_f(MAC_MI_MODE, tp->mi_mode);
857 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
863 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
864 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
867 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
869 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
873 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
874 MI_COM_PHY_ADDR_MASK);
875 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
876 MI_COM_REG_ADDR_MASK);
877 frame_val |= (val & MI_COM_DATA_MASK);
878 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
880 tw32_f(MAC_MI_COM, frame_val);
882 loops = PHY_BUSY_LOOPS;
885 frame_val = tr32(MAC_MI_COM);
886 if ((frame_val & MI_COM_BUSY) == 0) {
888 frame_val = tr32(MAC_MI_COM);
898 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
899 tw32_f(MAC_MI_MODE, tp->mi_mode);
906 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
910 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
914 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
918 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
919 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
923 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
929 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
933 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
937 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
941 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
942 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
946 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
952 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
956 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
958 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
963 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
967 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
969 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
974 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
978 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
979 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
980 MII_TG3_AUXCTL_SHDWSEL_MISC);
982 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
987 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
989 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
990 set |= MII_TG3_AUXCTL_MISC_WREN;
992 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
995 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
996 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
997 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
998 MII_TG3_AUXCTL_ACTL_TX_6DB)
1000 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1001 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1002 MII_TG3_AUXCTL_ACTL_TX_6DB);
1004 static int tg3_bmcr_reset(struct tg3 *tp)
1009 /* OK, reset it, and poll the BMCR_RESET bit until it
1010 * clears or we time out.
1012 phy_control = BMCR_RESET;
1013 err = tg3_writephy(tp, MII_BMCR, phy_control);
1019 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1023 if ((phy_control & BMCR_RESET) == 0) {
1035 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1037 struct tg3 *tp = bp->priv;
1040 spin_lock_bh(&tp->lock);
1042 if (tg3_readphy(tp, reg, &val))
1045 spin_unlock_bh(&tp->lock);
1050 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1052 struct tg3 *tp = bp->priv;
1055 spin_lock_bh(&tp->lock);
1057 if (tg3_writephy(tp, reg, val))
1060 spin_unlock_bh(&tp->lock);
1065 static int tg3_mdio_reset(struct mii_bus *bp)
1070 static void tg3_mdio_config_5785(struct tg3 *tp)
1073 struct phy_device *phydev;
1075 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1076 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1077 case PHY_ID_BCM50610:
1078 case PHY_ID_BCM50610M:
1079 val = MAC_PHYCFG2_50610_LED_MODES;
1081 case PHY_ID_BCMAC131:
1082 val = MAC_PHYCFG2_AC131_LED_MODES;
1084 case PHY_ID_RTL8211C:
1085 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1087 case PHY_ID_RTL8201E:
1088 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1094 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1095 tw32(MAC_PHYCFG2, val);
1097 val = tr32(MAC_PHYCFG1);
1098 val &= ~(MAC_PHYCFG1_RGMII_INT |
1099 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1100 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1101 tw32(MAC_PHYCFG1, val);
1106 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1107 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1108 MAC_PHYCFG2_FMODE_MASK_MASK |
1109 MAC_PHYCFG2_GMODE_MASK_MASK |
1110 MAC_PHYCFG2_ACT_MASK_MASK |
1111 MAC_PHYCFG2_QUAL_MASK_MASK |
1112 MAC_PHYCFG2_INBAND_ENABLE;
1114 tw32(MAC_PHYCFG2, val);
1116 val = tr32(MAC_PHYCFG1);
1117 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1118 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1119 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1120 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1121 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1122 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1123 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1125 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1126 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1127 tw32(MAC_PHYCFG1, val);
1129 val = tr32(MAC_EXT_RGMII_MODE);
1130 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1131 MAC_RGMII_MODE_RX_QUALITY |
1132 MAC_RGMII_MODE_RX_ACTIVITY |
1133 MAC_RGMII_MODE_RX_ENG_DET |
1134 MAC_RGMII_MODE_TX_ENABLE |
1135 MAC_RGMII_MODE_TX_LOWPWR |
1136 MAC_RGMII_MODE_TX_RESET);
1137 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1138 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1139 val |= MAC_RGMII_MODE_RX_INT_B |
1140 MAC_RGMII_MODE_RX_QUALITY |
1141 MAC_RGMII_MODE_RX_ACTIVITY |
1142 MAC_RGMII_MODE_RX_ENG_DET;
1143 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1144 val |= MAC_RGMII_MODE_TX_ENABLE |
1145 MAC_RGMII_MODE_TX_LOWPWR |
1146 MAC_RGMII_MODE_TX_RESET;
1148 tw32(MAC_EXT_RGMII_MODE, val);
1151 static void tg3_mdio_start(struct tg3 *tp)
1153 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1154 tw32_f(MAC_MI_MODE, tp->mi_mode);
1157 if (tg3_flag(tp, MDIOBUS_INITED) &&
1158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1159 tg3_mdio_config_5785(tp);
1162 static int tg3_mdio_init(struct tg3 *tp)
1166 struct phy_device *phydev;
1168 if (tg3_flag(tp, 5717_PLUS)) {
1171 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1173 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1174 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1176 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1177 TG3_CPMU_PHY_STRAP_IS_SERDES;
1181 tp->phy_addr = TG3_PHY_MII_ADDR;
1185 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1188 tp->mdio_bus = mdiobus_alloc();
1189 if (tp->mdio_bus == NULL)
1192 tp->mdio_bus->name = "tg3 mdio bus";
1193 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1194 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1195 tp->mdio_bus->priv = tp;
1196 tp->mdio_bus->parent = &tp->pdev->dev;
1197 tp->mdio_bus->read = &tg3_mdio_read;
1198 tp->mdio_bus->write = &tg3_mdio_write;
1199 tp->mdio_bus->reset = &tg3_mdio_reset;
1200 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1201 tp->mdio_bus->irq = &tp->mdio_irq[0];
1203 for (i = 0; i < PHY_MAX_ADDR; i++)
1204 tp->mdio_bus->irq[i] = PHY_POLL;
1206 /* The bus registration will look for all the PHYs on the mdio bus.
1207 * Unfortunately, it does not ensure the PHY is powered up before
1208 * accessing the PHY ID registers. A chip reset is the
1209 * quickest way to bring the device back to an operational state..
1211 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1214 i = mdiobus_register(tp->mdio_bus);
1216 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1217 mdiobus_free(tp->mdio_bus);
1221 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1223 if (!phydev || !phydev->drv) {
1224 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1225 mdiobus_unregister(tp->mdio_bus);
1226 mdiobus_free(tp->mdio_bus);
1230 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1231 case PHY_ID_BCM57780:
1232 phydev->interface = PHY_INTERFACE_MODE_GMII;
1233 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1235 case PHY_ID_BCM50610:
1236 case PHY_ID_BCM50610M:
1237 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1238 PHY_BRCM_RX_REFCLK_UNUSED |
1239 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1240 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1241 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1242 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1243 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1244 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1245 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1246 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1248 case PHY_ID_RTL8211C:
1249 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1251 case PHY_ID_RTL8201E:
1252 case PHY_ID_BCMAC131:
1253 phydev->interface = PHY_INTERFACE_MODE_MII;
1254 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1255 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1259 tg3_flag_set(tp, MDIOBUS_INITED);
1261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1262 tg3_mdio_config_5785(tp);
1267 static void tg3_mdio_fini(struct tg3 *tp)
1269 if (tg3_flag(tp, MDIOBUS_INITED)) {
1270 tg3_flag_clear(tp, MDIOBUS_INITED);
1271 mdiobus_unregister(tp->mdio_bus);
1272 mdiobus_free(tp->mdio_bus);
1276 /* tp->lock is held. */
1277 static inline void tg3_generate_fw_event(struct tg3 *tp)
1281 val = tr32(GRC_RX_CPU_EVENT);
1282 val |= GRC_RX_CPU_DRIVER_EVENT;
1283 tw32_f(GRC_RX_CPU_EVENT, val);
1285 tp->last_event_jiffies = jiffies;
1288 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1290 /* tp->lock is held. */
1291 static void tg3_wait_for_event_ack(struct tg3 *tp)
1294 unsigned int delay_cnt;
1297 /* If enough time has passed, no wait is necessary. */
1298 time_remain = (long)(tp->last_event_jiffies + 1 +
1299 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1301 if (time_remain < 0)
1304 /* Check if we can shorten the wait time. */
1305 delay_cnt = jiffies_to_usecs(time_remain);
1306 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1307 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1308 delay_cnt = (delay_cnt >> 3) + 1;
1310 for (i = 0; i < delay_cnt; i++) {
1311 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1317 /* tp->lock is held. */
1318 static void tg3_ump_link_report(struct tg3 *tp)
1323 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1326 tg3_wait_for_event_ack(tp);
1328 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1330 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1333 if (!tg3_readphy(tp, MII_BMCR, ®))
1335 if (!tg3_readphy(tp, MII_BMSR, ®))
1336 val |= (reg & 0xffff);
1337 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1340 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1342 if (!tg3_readphy(tp, MII_LPA, ®))
1343 val |= (reg & 0xffff);
1344 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1347 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1348 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1350 if (!tg3_readphy(tp, MII_STAT1000, ®))
1351 val |= (reg & 0xffff);
1353 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1355 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1359 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1361 tg3_generate_fw_event(tp);
1364 static void tg3_link_report(struct tg3 *tp)
1366 if (!netif_carrier_ok(tp->dev)) {
1367 netif_info(tp, link, tp->dev, "Link is down\n");
1368 tg3_ump_link_report(tp);
1369 } else if (netif_msg_link(tp)) {
1370 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1371 (tp->link_config.active_speed == SPEED_1000 ?
1373 (tp->link_config.active_speed == SPEED_100 ?
1375 (tp->link_config.active_duplex == DUPLEX_FULL ?
1378 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1379 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1381 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1384 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1385 netdev_info(tp->dev, "EEE is %s\n",
1386 tp->setlpicnt ? "enabled" : "disabled");
1388 tg3_ump_link_report(tp);
1392 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1396 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1397 miireg = ADVERTISE_PAUSE_CAP;
1398 else if (flow_ctrl & FLOW_CTRL_TX)
1399 miireg = ADVERTISE_PAUSE_ASYM;
1400 else if (flow_ctrl & FLOW_CTRL_RX)
1401 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1408 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1412 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1413 miireg = ADVERTISE_1000XPAUSE;
1414 else if (flow_ctrl & FLOW_CTRL_TX)
1415 miireg = ADVERTISE_1000XPSE_ASYM;
1416 else if (flow_ctrl & FLOW_CTRL_RX)
1417 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1424 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1428 if (lcladv & ADVERTISE_1000XPAUSE) {
1429 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1430 if (rmtadv & LPA_1000XPAUSE)
1431 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1432 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1435 if (rmtadv & LPA_1000XPAUSE)
1436 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1438 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1439 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1446 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1450 u32 old_rx_mode = tp->rx_mode;
1451 u32 old_tx_mode = tp->tx_mode;
1453 if (tg3_flag(tp, USE_PHYLIB))
1454 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1456 autoneg = tp->link_config.autoneg;
1458 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1459 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1460 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1462 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1464 flowctrl = tp->link_config.flowctrl;
1466 tp->link_config.active_flowctrl = flowctrl;
1468 if (flowctrl & FLOW_CTRL_RX)
1469 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1471 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1473 if (old_rx_mode != tp->rx_mode)
1474 tw32_f(MAC_RX_MODE, tp->rx_mode);
1476 if (flowctrl & FLOW_CTRL_TX)
1477 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1479 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1481 if (old_tx_mode != tp->tx_mode)
1482 tw32_f(MAC_TX_MODE, tp->tx_mode);
1485 static void tg3_adjust_link(struct net_device *dev)
1487 u8 oldflowctrl, linkmesg = 0;
1488 u32 mac_mode, lcl_adv, rmt_adv;
1489 struct tg3 *tp = netdev_priv(dev);
1490 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1492 spin_lock_bh(&tp->lock);
1494 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1495 MAC_MODE_HALF_DUPLEX);
1497 oldflowctrl = tp->link_config.active_flowctrl;
1503 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1504 mac_mode |= MAC_MODE_PORT_MODE_MII;
1505 else if (phydev->speed == SPEED_1000 ||
1506 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1507 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1509 mac_mode |= MAC_MODE_PORT_MODE_MII;
1511 if (phydev->duplex == DUPLEX_HALF)
1512 mac_mode |= MAC_MODE_HALF_DUPLEX;
1514 lcl_adv = tg3_advert_flowctrl_1000T(
1515 tp->link_config.flowctrl);
1518 rmt_adv = LPA_PAUSE_CAP;
1519 if (phydev->asym_pause)
1520 rmt_adv |= LPA_PAUSE_ASYM;
1523 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1525 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1527 if (mac_mode != tp->mac_mode) {
1528 tp->mac_mode = mac_mode;
1529 tw32_f(MAC_MODE, tp->mac_mode);
1533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1534 if (phydev->speed == SPEED_10)
1536 MAC_MI_STAT_10MBPS_MODE |
1537 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1539 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1542 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1543 tw32(MAC_TX_LENGTHS,
1544 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1545 (6 << TX_LENGTHS_IPG_SHIFT) |
1546 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1548 tw32(MAC_TX_LENGTHS,
1549 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1550 (6 << TX_LENGTHS_IPG_SHIFT) |
1551 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1553 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1554 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1555 phydev->speed != tp->link_config.active_speed ||
1556 phydev->duplex != tp->link_config.active_duplex ||
1557 oldflowctrl != tp->link_config.active_flowctrl)
1560 tp->link_config.active_speed = phydev->speed;
1561 tp->link_config.active_duplex = phydev->duplex;
1563 spin_unlock_bh(&tp->lock);
1566 tg3_link_report(tp);
1569 static int tg3_phy_init(struct tg3 *tp)
1571 struct phy_device *phydev;
1573 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1576 /* Bring the PHY back to a known state. */
1579 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1581 /* Attach the MAC to the PHY. */
1582 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1583 phydev->dev_flags, phydev->interface);
1584 if (IS_ERR(phydev)) {
1585 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1586 return PTR_ERR(phydev);
1589 /* Mask with MAC supported features. */
1590 switch (phydev->interface) {
1591 case PHY_INTERFACE_MODE_GMII:
1592 case PHY_INTERFACE_MODE_RGMII:
1593 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1594 phydev->supported &= (PHY_GBIT_FEATURES |
1596 SUPPORTED_Asym_Pause);
1600 case PHY_INTERFACE_MODE_MII:
1601 phydev->supported &= (PHY_BASIC_FEATURES |
1603 SUPPORTED_Asym_Pause);
1606 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1610 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1612 phydev->advertising = phydev->supported;
1617 static void tg3_phy_start(struct tg3 *tp)
1619 struct phy_device *phydev;
1621 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1624 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1626 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1627 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1628 phydev->speed = tp->link_config.orig_speed;
1629 phydev->duplex = tp->link_config.orig_duplex;
1630 phydev->autoneg = tp->link_config.orig_autoneg;
1631 phydev->advertising = tp->link_config.orig_advertising;
1636 phy_start_aneg(phydev);
1639 static void tg3_phy_stop(struct tg3 *tp)
1641 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1644 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1647 static void tg3_phy_fini(struct tg3 *tp)
1649 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1650 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1651 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1655 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1659 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1662 tg3_writephy(tp, MII_TG3_FET_TEST,
1663 phytest | MII_TG3_FET_SHADOW_EN);
1664 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1666 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1668 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1671 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1675 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1679 if (!tg3_flag(tp, 5705_PLUS) ||
1680 (tg3_flag(tp, 5717_PLUS) &&
1681 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1684 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1685 tg3_phy_fet_toggle_apd(tp, enable);
1689 reg = MII_TG3_MISC_SHDW_WREN |
1690 MII_TG3_MISC_SHDW_SCR5_SEL |
1691 MII_TG3_MISC_SHDW_SCR5_LPED |
1692 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1693 MII_TG3_MISC_SHDW_SCR5_SDTL |
1694 MII_TG3_MISC_SHDW_SCR5_C125OE;
1695 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1696 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1698 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1701 reg = MII_TG3_MISC_SHDW_WREN |
1702 MII_TG3_MISC_SHDW_APD_SEL |
1703 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1705 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1707 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1710 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1714 if (!tg3_flag(tp, 5705_PLUS) ||
1715 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1718 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1721 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1722 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1724 tg3_writephy(tp, MII_TG3_FET_TEST,
1725 ephy | MII_TG3_FET_SHADOW_EN);
1726 if (!tg3_readphy(tp, reg, &phy)) {
1728 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1730 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731 tg3_writephy(tp, reg, phy);
1733 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1738 ret = tg3_phy_auxctl_read(tp,
1739 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1742 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1744 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745 tg3_phy_auxctl_write(tp,
1746 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1751 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1756 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1759 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1761 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1762 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1765 static void tg3_phy_apply_otp(struct tg3 *tp)
1774 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1777 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1778 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1779 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1781 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1782 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1783 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1785 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1786 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1787 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1789 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1790 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1792 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1793 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1795 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1796 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1797 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1799 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1802 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1806 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1811 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1812 current_link_up == 1 &&
1813 tp->link_config.active_duplex == DUPLEX_FULL &&
1814 (tp->link_config.active_speed == SPEED_100 ||
1815 tp->link_config.active_speed == SPEED_1000)) {
1818 if (tp->link_config.active_speed == SPEED_1000)
1819 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1821 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1823 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1825 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1826 TG3_CL45_D7_EEERES_STAT, &val);
1828 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1829 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1833 if (!tp->setlpicnt) {
1834 val = tr32(TG3_CPMU_EEE_MODE);
1835 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1839 static void tg3_phy_eee_enable(struct tg3 *tp)
1843 if (tp->link_config.active_speed == SPEED_1000 &&
1844 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1847 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1848 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1849 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1852 val = tr32(TG3_CPMU_EEE_MODE);
1853 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1856 static int tg3_wait_macro_done(struct tg3 *tp)
1863 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1864 if ((tmp32 & 0x1000) == 0)
1874 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1876 static const u32 test_pat[4][6] = {
1877 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1878 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1879 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1880 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1884 for (chan = 0; chan < 4; chan++) {
1887 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1888 (chan * 0x2000) | 0x0200);
1889 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1891 for (i = 0; i < 6; i++)
1892 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1895 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1896 if (tg3_wait_macro_done(tp)) {
1901 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1902 (chan * 0x2000) | 0x0200);
1903 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1904 if (tg3_wait_macro_done(tp)) {
1909 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1910 if (tg3_wait_macro_done(tp)) {
1915 for (i = 0; i < 6; i += 2) {
1918 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1919 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1920 tg3_wait_macro_done(tp)) {
1926 if (low != test_pat[chan][i] ||
1927 high != test_pat[chan][i+1]) {
1928 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1929 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1930 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1940 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1944 for (chan = 0; chan < 4; chan++) {
1947 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1948 (chan * 0x2000) | 0x0200);
1949 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1950 for (i = 0; i < 6; i++)
1951 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1952 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1953 if (tg3_wait_macro_done(tp))
1960 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1962 u32 reg32, phy9_orig;
1963 int retries, do_phy_reset, err;
1969 err = tg3_bmcr_reset(tp);
1975 /* Disable transmitter and interrupt. */
1976 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1980 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1982 /* Set full-duplex, 1000 mbps. */
1983 tg3_writephy(tp, MII_BMCR,
1984 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1986 /* Set to master mode. */
1987 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1990 tg3_writephy(tp, MII_TG3_CTRL,
1991 (MII_TG3_CTRL_AS_MASTER |
1992 MII_TG3_CTRL_ENABLE_AS_MASTER));
1994 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1998 /* Block the PHY control access. */
1999 tg3_phydsp_write(tp, 0x8005, 0x0800);
2001 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2004 } while (--retries);
2006 err = tg3_phy_reset_chanpat(tp);
2010 tg3_phydsp_write(tp, 0x8005, 0x0000);
2012 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2013 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2015 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2017 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2019 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2021 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2028 /* This will reset the tigon3 PHY if there is no valid
2029 * link unless the FORCE argument is non-zero.
2031 static int tg3_phy_reset(struct tg3 *tp)
2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2037 val = tr32(GRC_MISC_CFG);
2038 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2041 err = tg3_readphy(tp, MII_BMSR, &val);
2042 err |= tg3_readphy(tp, MII_BMSR, &val);
2046 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2047 netif_carrier_off(tp->dev);
2048 tg3_link_report(tp);
2051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2054 err = tg3_phy_reset_5703_4_5(tp);
2061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2062 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2063 cpmuctrl = tr32(TG3_CPMU_CTRL);
2064 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2066 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2069 err = tg3_bmcr_reset(tp);
2073 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2074 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2075 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2077 tw32(TG3_CPMU_CTRL, cpmuctrl);
2080 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2081 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2082 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2083 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2084 CPMU_LSPD_1000MB_MACCLK_12_5) {
2085 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2087 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2091 if (tg3_flag(tp, 5717_PLUS) &&
2092 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2095 tg3_phy_apply_otp(tp);
2097 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2098 tg3_phy_toggle_apd(tp, true);
2100 tg3_phy_toggle_apd(tp, false);
2103 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2104 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2105 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2106 tg3_phydsp_write(tp, 0x000a, 0x0323);
2107 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2110 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2111 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2115 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117 tg3_phydsp_write(tp, 0x000a, 0x310b);
2118 tg3_phydsp_write(tp, 0x201f, 0x9506);
2119 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2120 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2122 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2123 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2124 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2125 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2126 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2127 tg3_writephy(tp, MII_TG3_TEST1,
2128 MII_TG3_TEST1_TRIM_EN | 0x4);
2130 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2132 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2136 /* Set Extended packet length bit (bit 14) on all chips that */
2137 /* support jumbo frames */
2138 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2139 /* Cannot do read-modify-write on 5401 */
2140 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2141 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2142 /* Set bit 14 with read-modify-write to preserve other bits */
2143 err = tg3_phy_auxctl_read(tp,
2144 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2146 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2147 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2150 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2151 * jumbo frames transmission.
2153 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2154 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2155 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2156 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2160 /* adjust output voltage */
2161 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2164 tg3_phy_toggle_automdix(tp, 1);
2165 tg3_phy_set_wirespeed(tp);
2169 static void tg3_frob_aux_power(struct tg3 *tp)
2171 bool need_vaux = false;
2173 /* The GPIOs do something completely different on 57765. */
2174 if (!tg3_flag(tp, IS_NIC) ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2179 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2181 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2183 tp->pdev_peer != tp->pdev) {
2184 struct net_device *dev_peer;
2186 dev_peer = pci_get_drvdata(tp->pdev_peer);
2188 /* remove_one() may have been run on the peer. */
2190 struct tg3 *tp_peer = netdev_priv(dev_peer);
2192 if (tg3_flag(tp_peer, INIT_COMPLETE))
2195 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2196 tg3_flag(tp_peer, ENABLE_ASF))
2201 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2206 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2207 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2208 (GRC_LCLCTRL_GPIO_OE0 |
2209 GRC_LCLCTRL_GPIO_OE1 |
2210 GRC_LCLCTRL_GPIO_OE2 |
2211 GRC_LCLCTRL_GPIO_OUTPUT0 |
2212 GRC_LCLCTRL_GPIO_OUTPUT1),
2214 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2215 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2216 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2217 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2218 GRC_LCLCTRL_GPIO_OE1 |
2219 GRC_LCLCTRL_GPIO_OE2 |
2220 GRC_LCLCTRL_GPIO_OUTPUT0 |
2221 GRC_LCLCTRL_GPIO_OUTPUT1 |
2223 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2225 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2226 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2228 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2229 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2232 u32 grc_local_ctrl = 0;
2234 /* Workaround to prevent overdrawing Amps. */
2235 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2237 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2238 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2239 grc_local_ctrl, 100);
2242 /* On 5753 and variants, GPIO2 cannot be used. */
2243 no_gpio2 = tp->nic_sram_data_cfg &
2244 NIC_SRAM_DATA_CFG_NO_GPIO2;
2246 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2247 GRC_LCLCTRL_GPIO_OE1 |
2248 GRC_LCLCTRL_GPIO_OE2 |
2249 GRC_LCLCTRL_GPIO_OUTPUT1 |
2250 GRC_LCLCTRL_GPIO_OUTPUT2;
2252 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2253 GRC_LCLCTRL_GPIO_OUTPUT2);
2255 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2256 grc_local_ctrl, 100);
2258 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2260 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2261 grc_local_ctrl, 100);
2264 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2265 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266 grc_local_ctrl, 100);
2270 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2271 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2272 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273 (GRC_LCLCTRL_GPIO_OE1 |
2274 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2276 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277 GRC_LCLCTRL_GPIO_OE1, 100);
2279 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2280 (GRC_LCLCTRL_GPIO_OE1 |
2281 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2286 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2288 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2290 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2291 if (speed != SPEED_10)
2293 } else if (speed == SPEED_10)
2299 static int tg3_setup_phy(struct tg3 *, int);
2301 #define RESET_KIND_SHUTDOWN 0
2302 #define RESET_KIND_INIT 1
2303 #define RESET_KIND_SUSPEND 2
2305 static void tg3_write_sig_post_reset(struct tg3 *, int);
2306 static int tg3_halt_cpu(struct tg3 *, u32);
2308 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2312 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2314 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2315 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2318 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2319 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2320 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2327 val = tr32(GRC_MISC_CFG);
2328 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2331 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2333 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2336 tg3_writephy(tp, MII_ADVERTISE, 0);
2337 tg3_writephy(tp, MII_BMCR,
2338 BMCR_ANENABLE | BMCR_ANRESTART);
2340 tg3_writephy(tp, MII_TG3_FET_TEST,
2341 phytest | MII_TG3_FET_SHADOW_EN);
2342 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2343 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2345 MII_TG3_FET_SHDW_AUXMODE4,
2348 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2351 } else if (do_low_power) {
2352 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2353 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2355 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2356 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2357 MII_TG3_AUXCTL_PCTL_VREG_11V;
2358 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2361 /* The PHY should not be powered down on some chips because
2364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2366 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2367 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2370 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2371 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2372 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2373 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2374 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2375 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2378 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2381 /* tp->lock is held. */
2382 static int tg3_nvram_lock(struct tg3 *tp)
2384 if (tg3_flag(tp, NVRAM)) {
2387 if (tp->nvram_lock_cnt == 0) {
2388 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2389 for (i = 0; i < 8000; i++) {
2390 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2395 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2399 tp->nvram_lock_cnt++;
2404 /* tp->lock is held. */
2405 static void tg3_nvram_unlock(struct tg3 *tp)
2407 if (tg3_flag(tp, NVRAM)) {
2408 if (tp->nvram_lock_cnt > 0)
2409 tp->nvram_lock_cnt--;
2410 if (tp->nvram_lock_cnt == 0)
2411 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2415 /* tp->lock is held. */
2416 static void tg3_enable_nvram_access(struct tg3 *tp)
2418 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2419 u32 nvaccess = tr32(NVRAM_ACCESS);
2421 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2425 /* tp->lock is held. */
2426 static void tg3_disable_nvram_access(struct tg3 *tp)
2428 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2429 u32 nvaccess = tr32(NVRAM_ACCESS);
2431 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2435 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2436 u32 offset, u32 *val)
2441 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2444 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2445 EEPROM_ADDR_DEVID_MASK |
2447 tw32(GRC_EEPROM_ADDR,
2449 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2450 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2451 EEPROM_ADDR_ADDR_MASK) |
2452 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2454 for (i = 0; i < 1000; i++) {
2455 tmp = tr32(GRC_EEPROM_ADDR);
2457 if (tmp & EEPROM_ADDR_COMPLETE)
2461 if (!(tmp & EEPROM_ADDR_COMPLETE))
2464 tmp = tr32(GRC_EEPROM_DATA);
2467 * The data will always be opposite the native endian
2468 * format. Perform a blind byteswap to compensate.
2475 #define NVRAM_CMD_TIMEOUT 10000
2477 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2481 tw32(NVRAM_CMD, nvram_cmd);
2482 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2484 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2490 if (i == NVRAM_CMD_TIMEOUT)
2496 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2498 if (tg3_flag(tp, NVRAM) &&
2499 tg3_flag(tp, NVRAM_BUFFERED) &&
2500 tg3_flag(tp, FLASH) &&
2501 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2502 (tp->nvram_jedecnum == JEDEC_ATMEL))
2504 addr = ((addr / tp->nvram_pagesize) <<
2505 ATMEL_AT45DB0X1B_PAGE_POS) +
2506 (addr % tp->nvram_pagesize);
2511 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2513 if (tg3_flag(tp, NVRAM) &&
2514 tg3_flag(tp, NVRAM_BUFFERED) &&
2515 tg3_flag(tp, FLASH) &&
2516 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2517 (tp->nvram_jedecnum == JEDEC_ATMEL))
2519 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2520 tp->nvram_pagesize) +
2521 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2526 /* NOTE: Data read in from NVRAM is byteswapped according to
2527 * the byteswapping settings for all other register accesses.
2528 * tg3 devices are BE devices, so on a BE machine, the data
2529 * returned will be exactly as it is seen in NVRAM. On a LE
2530 * machine, the 32-bit value will be byteswapped.
2532 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2536 if (!tg3_flag(tp, NVRAM))
2537 return tg3_nvram_read_using_eeprom(tp, offset, val);
2539 offset = tg3_nvram_phys_addr(tp, offset);
2541 if (offset > NVRAM_ADDR_MSK)
2544 ret = tg3_nvram_lock(tp);
2548 tg3_enable_nvram_access(tp);
2550 tw32(NVRAM_ADDR, offset);
2551 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2552 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2555 *val = tr32(NVRAM_RDDATA);
2557 tg3_disable_nvram_access(tp);
2559 tg3_nvram_unlock(tp);
2564 /* Ensures NVRAM data is in bytestream format. */
2565 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2568 int res = tg3_nvram_read(tp, offset, &v);
2570 *val = cpu_to_be32(v);
2574 /* tp->lock is held. */
2575 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2577 u32 addr_high, addr_low;
2580 addr_high = ((tp->dev->dev_addr[0] << 8) |
2581 tp->dev->dev_addr[1]);
2582 addr_low = ((tp->dev->dev_addr[2] << 24) |
2583 (tp->dev->dev_addr[3] << 16) |
2584 (tp->dev->dev_addr[4] << 8) |
2585 (tp->dev->dev_addr[5] << 0));
2586 for (i = 0; i < 4; i++) {
2587 if (i == 1 && skip_mac_1)
2589 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2590 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2595 for (i = 0; i < 12; i++) {
2596 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2597 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2601 addr_high = (tp->dev->dev_addr[0] +
2602 tp->dev->dev_addr[1] +
2603 tp->dev->dev_addr[2] +
2604 tp->dev->dev_addr[3] +
2605 tp->dev->dev_addr[4] +
2606 tp->dev->dev_addr[5]) &
2607 TX_BACKOFF_SEED_MASK;
2608 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2611 static void tg3_enable_register_access(struct tg3 *tp)
2614 * Make sure register accesses (indirect or otherwise) will function
2617 pci_write_config_dword(tp->pdev,
2618 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2621 static int tg3_power_up(struct tg3 *tp)
2623 tg3_enable_register_access(tp);
2625 pci_set_power_state(tp->pdev, PCI_D0);
2627 /* Switch out of Vaux if it is a NIC */
2628 if (tg3_flag(tp, IS_NIC))
2629 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2634 static int tg3_power_down_prepare(struct tg3 *tp)
2637 bool device_should_wake, do_low_power;
2639 tg3_enable_register_access(tp);
2641 /* Restore the CLKREQ setting. */
2642 if (tg3_flag(tp, CLKREQ_BUG)) {
2645 pci_read_config_word(tp->pdev,
2646 tp->pcie_cap + PCI_EXP_LNKCTL,
2648 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2649 pci_write_config_word(tp->pdev,
2650 tp->pcie_cap + PCI_EXP_LNKCTL,
2654 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2655 tw32(TG3PCI_MISC_HOST_CTRL,
2656 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2658 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2659 tg3_flag(tp, WOL_ENABLE);
2661 if (tg3_flag(tp, USE_PHYLIB)) {
2662 do_low_power = false;
2663 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2664 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2665 struct phy_device *phydev;
2666 u32 phyid, advertising;
2668 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2670 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2672 tp->link_config.orig_speed = phydev->speed;
2673 tp->link_config.orig_duplex = phydev->duplex;
2674 tp->link_config.orig_autoneg = phydev->autoneg;
2675 tp->link_config.orig_advertising = phydev->advertising;
2677 advertising = ADVERTISED_TP |
2679 ADVERTISED_Autoneg |
2680 ADVERTISED_10baseT_Half;
2682 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2683 if (tg3_flag(tp, WOL_SPEED_100MB))
2685 ADVERTISED_100baseT_Half |
2686 ADVERTISED_100baseT_Full |
2687 ADVERTISED_10baseT_Full;
2689 advertising |= ADVERTISED_10baseT_Full;
2692 phydev->advertising = advertising;
2694 phy_start_aneg(phydev);
2696 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2697 if (phyid != PHY_ID_BCMAC131) {
2698 phyid &= PHY_BCM_OUI_MASK;
2699 if (phyid == PHY_BCM_OUI_1 ||
2700 phyid == PHY_BCM_OUI_2 ||
2701 phyid == PHY_BCM_OUI_3)
2702 do_low_power = true;
2706 do_low_power = true;
2708 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2709 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2710 tp->link_config.orig_speed = tp->link_config.speed;
2711 tp->link_config.orig_duplex = tp->link_config.duplex;
2712 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2715 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2716 tp->link_config.speed = SPEED_10;
2717 tp->link_config.duplex = DUPLEX_HALF;
2718 tp->link_config.autoneg = AUTONEG_ENABLE;
2719 tg3_setup_phy(tp, 0);
2723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2726 val = tr32(GRC_VCPU_EXT_CTRL);
2727 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2728 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2732 for (i = 0; i < 200; i++) {
2733 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2734 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2739 if (tg3_flag(tp, WOL_CAP))
2740 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2741 WOL_DRV_STATE_SHUTDOWN |
2745 if (device_should_wake) {
2748 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2750 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2751 tg3_phy_auxctl_write(tp,
2752 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2753 MII_TG3_AUXCTL_PCTL_WOL_EN |
2754 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2755 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2759 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2760 mac_mode = MAC_MODE_PORT_MODE_GMII;
2762 mac_mode = MAC_MODE_PORT_MODE_MII;
2764 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2765 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2767 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2768 SPEED_100 : SPEED_10;
2769 if (tg3_5700_link_polarity(tp, speed))
2770 mac_mode |= MAC_MODE_LINK_POLARITY;
2772 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2775 mac_mode = MAC_MODE_PORT_MODE_TBI;
2778 if (!tg3_flag(tp, 5750_PLUS))
2779 tw32(MAC_LED_CTRL, tp->led_ctrl);
2781 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2782 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2783 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2784 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2786 if (tg3_flag(tp, ENABLE_APE))
2787 mac_mode |= MAC_MODE_APE_TX_EN |
2788 MAC_MODE_APE_RX_EN |
2789 MAC_MODE_TDE_ENABLE;
2791 tw32_f(MAC_MODE, mac_mode);
2794 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2798 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2799 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2800 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2803 base_val = tp->pci_clock_ctrl;
2804 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2805 CLOCK_CTRL_TXCLK_DISABLE);
2807 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2808 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2809 } else if (tg3_flag(tp, 5780_CLASS) ||
2810 tg3_flag(tp, CPMU_PRESENT) ||
2811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2813 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2814 u32 newbits1, newbits2;
2816 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2818 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2819 CLOCK_CTRL_TXCLK_DISABLE |
2821 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2822 } else if (tg3_flag(tp, 5705_PLUS)) {
2823 newbits1 = CLOCK_CTRL_625_CORE;
2824 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2826 newbits1 = CLOCK_CTRL_ALTCLK;
2827 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2830 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2833 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2836 if (!tg3_flag(tp, 5705_PLUS)) {
2839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2841 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2842 CLOCK_CTRL_TXCLK_DISABLE |
2843 CLOCK_CTRL_44MHZ_CORE);
2845 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2848 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2849 tp->pci_clock_ctrl | newbits3, 40);
2853 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2854 tg3_power_down_phy(tp, do_low_power);
2856 tg3_frob_aux_power(tp);
2858 /* Workaround for unstable PLL clock */
2859 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2860 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2861 u32 val = tr32(0x7d00);
2863 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2865 if (!tg3_flag(tp, ENABLE_ASF)) {
2868 err = tg3_nvram_lock(tp);
2869 tg3_halt_cpu(tp, RX_CPU_BASE);
2871 tg3_nvram_unlock(tp);
2875 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2880 static void tg3_power_down(struct tg3 *tp)
2882 tg3_power_down_prepare(tp);
2884 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2885 pci_set_power_state(tp->pdev, PCI_D3hot);
2888 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2890 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2891 case MII_TG3_AUX_STAT_10HALF:
2893 *duplex = DUPLEX_HALF;
2896 case MII_TG3_AUX_STAT_10FULL:
2898 *duplex = DUPLEX_FULL;
2901 case MII_TG3_AUX_STAT_100HALF:
2903 *duplex = DUPLEX_HALF;
2906 case MII_TG3_AUX_STAT_100FULL:
2908 *duplex = DUPLEX_FULL;
2911 case MII_TG3_AUX_STAT_1000HALF:
2912 *speed = SPEED_1000;
2913 *duplex = DUPLEX_HALF;
2916 case MII_TG3_AUX_STAT_1000FULL:
2917 *speed = SPEED_1000;
2918 *duplex = DUPLEX_FULL;
2922 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2923 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2925 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2929 *speed = SPEED_INVALID;
2930 *duplex = DUPLEX_INVALID;
2935 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2940 new_adv = ADVERTISE_CSMA;
2941 if (advertise & ADVERTISED_10baseT_Half)
2942 new_adv |= ADVERTISE_10HALF;
2943 if (advertise & ADVERTISED_10baseT_Full)
2944 new_adv |= ADVERTISE_10FULL;
2945 if (advertise & ADVERTISED_100baseT_Half)
2946 new_adv |= ADVERTISE_100HALF;
2947 if (advertise & ADVERTISED_100baseT_Full)
2948 new_adv |= ADVERTISE_100FULL;
2950 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2952 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2956 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2960 if (advertise & ADVERTISED_1000baseT_Half)
2961 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2962 if (advertise & ADVERTISED_1000baseT_Full)
2963 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2965 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2966 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2967 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2968 MII_TG3_CTRL_ENABLE_AS_MASTER);
2970 err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2974 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2977 tw32(TG3_CPMU_EEE_MODE,
2978 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2980 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2984 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2986 case ASIC_REV_57765:
2987 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2988 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2989 MII_TG3_DSP_CH34TP2_HIBW01);
2992 val = MII_TG3_DSP_TAP26_ALNOKO |
2993 MII_TG3_DSP_TAP26_RMRXSTO |
2994 MII_TG3_DSP_TAP26_OPCSINPT;
2995 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2999 /* Advertise 100-BaseTX EEE ability */
3000 if (advertise & ADVERTISED_100baseT_Full)
3001 val |= MDIO_AN_EEE_ADV_100TX;
3002 /* Advertise 1000-BaseT EEE ability */
3003 if (advertise & ADVERTISED_1000baseT_Full)
3004 val |= MDIO_AN_EEE_ADV_1000T;
3005 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3007 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3016 static void tg3_phy_copper_begin(struct tg3 *tp)
3021 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3022 new_adv = ADVERTISED_10baseT_Half |
3023 ADVERTISED_10baseT_Full;
3024 if (tg3_flag(tp, WOL_SPEED_100MB))
3025 new_adv |= ADVERTISED_100baseT_Half |
3026 ADVERTISED_100baseT_Full;
3028 tg3_phy_autoneg_cfg(tp, new_adv,
3029 FLOW_CTRL_TX | FLOW_CTRL_RX);
3030 } else if (tp->link_config.speed == SPEED_INVALID) {
3031 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3032 tp->link_config.advertising &=
3033 ~(ADVERTISED_1000baseT_Half |
3034 ADVERTISED_1000baseT_Full);
3036 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3037 tp->link_config.flowctrl);
3039 /* Asking for a specific link mode. */
3040 if (tp->link_config.speed == SPEED_1000) {
3041 if (tp->link_config.duplex == DUPLEX_FULL)
3042 new_adv = ADVERTISED_1000baseT_Full;
3044 new_adv = ADVERTISED_1000baseT_Half;
3045 } else if (tp->link_config.speed == SPEED_100) {
3046 if (tp->link_config.duplex == DUPLEX_FULL)
3047 new_adv = ADVERTISED_100baseT_Full;
3049 new_adv = ADVERTISED_100baseT_Half;
3051 if (tp->link_config.duplex == DUPLEX_FULL)
3052 new_adv = ADVERTISED_10baseT_Full;
3054 new_adv = ADVERTISED_10baseT_Half;
3057 tg3_phy_autoneg_cfg(tp, new_adv,
3058 tp->link_config.flowctrl);
3061 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3062 tp->link_config.speed != SPEED_INVALID) {
3063 u32 bmcr, orig_bmcr;
3065 tp->link_config.active_speed = tp->link_config.speed;
3066 tp->link_config.active_duplex = tp->link_config.duplex;
3069 switch (tp->link_config.speed) {
3075 bmcr |= BMCR_SPEED100;
3079 bmcr |= TG3_BMCR_SPEED1000;
3083 if (tp->link_config.duplex == DUPLEX_FULL)
3084 bmcr |= BMCR_FULLDPLX;
3086 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3087 (bmcr != orig_bmcr)) {
3088 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3089 for (i = 0; i < 1500; i++) {
3093 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3094 tg3_readphy(tp, MII_BMSR, &tmp))
3096 if (!(tmp & BMSR_LSTATUS)) {
3101 tg3_writephy(tp, MII_BMCR, bmcr);
3105 tg3_writephy(tp, MII_BMCR,
3106 BMCR_ANENABLE | BMCR_ANRESTART);
3110 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3114 /* Turn off tap power management. */
3115 /* Set Extended packet length bit */
3116 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3118 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3119 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3120 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3121 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3122 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3129 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3131 u32 adv_reg, all_mask = 0;
3133 if (mask & ADVERTISED_10baseT_Half)
3134 all_mask |= ADVERTISE_10HALF;
3135 if (mask & ADVERTISED_10baseT_Full)
3136 all_mask |= ADVERTISE_10FULL;
3137 if (mask & ADVERTISED_100baseT_Half)
3138 all_mask |= ADVERTISE_100HALF;
3139 if (mask & ADVERTISED_100baseT_Full)
3140 all_mask |= ADVERTISE_100FULL;
3142 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3145 if ((adv_reg & all_mask) != all_mask)
3147 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3151 if (mask & ADVERTISED_1000baseT_Half)
3152 all_mask |= ADVERTISE_1000HALF;
3153 if (mask & ADVERTISED_1000baseT_Full)
3154 all_mask |= ADVERTISE_1000FULL;
3156 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3159 if ((tg3_ctrl & all_mask) != all_mask)
3165 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3169 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3172 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3173 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3175 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3176 if (curadv != reqadv)
3179 if (tg3_flag(tp, PAUSE_AUTONEG))
3180 tg3_readphy(tp, MII_LPA, rmtadv);
3182 /* Reprogram the advertisement register, even if it
3183 * does not affect the current link. If the link
3184 * gets renegotiated in the future, we can save an
3185 * additional renegotiation cycle by advertising
3186 * it correctly in the first place.
3188 if (curadv != reqadv) {
3189 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3190 ADVERTISE_PAUSE_ASYM);
3191 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3198 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3200 int current_link_up;
3202 u32 lcl_adv, rmt_adv;
3210 (MAC_STATUS_SYNC_CHANGED |
3211 MAC_STATUS_CFG_CHANGED |
3212 MAC_STATUS_MI_COMPLETION |
3213 MAC_STATUS_LNKSTATE_CHANGED));
3216 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3218 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3222 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3224 /* Some third-party PHYs need to be reset on link going
3227 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3230 netif_carrier_ok(tp->dev)) {
3231 tg3_readphy(tp, MII_BMSR, &bmsr);
3232 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3233 !(bmsr & BMSR_LSTATUS))
3239 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3240 tg3_readphy(tp, MII_BMSR, &bmsr);
3241 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3242 !tg3_flag(tp, INIT_COMPLETE))
3245 if (!(bmsr & BMSR_LSTATUS)) {
3246 err = tg3_init_5401phy_dsp(tp);
3250 tg3_readphy(tp, MII_BMSR, &bmsr);
3251 for (i = 0; i < 1000; i++) {
3253 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3254 (bmsr & BMSR_LSTATUS)) {
3260 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3261 TG3_PHY_REV_BCM5401_B0 &&
3262 !(bmsr & BMSR_LSTATUS) &&
3263 tp->link_config.active_speed == SPEED_1000) {
3264 err = tg3_phy_reset(tp);
3266 err = tg3_init_5401phy_dsp(tp);
3271 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3272 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3273 /* 5701 {A0,B0} CRC bug workaround */
3274 tg3_writephy(tp, 0x15, 0x0a75);
3275 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3276 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3277 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3280 /* Clear pending interrupts... */
3281 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3284 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3285 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3286 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3287 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3291 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3292 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3293 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3295 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3298 current_link_up = 0;
3299 current_speed = SPEED_INVALID;
3300 current_duplex = DUPLEX_INVALID;
3302 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3303 err = tg3_phy_auxctl_read(tp,
3304 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3306 if (!err && !(val & (1 << 10))) {
3307 tg3_phy_auxctl_write(tp,
3308 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3315 for (i = 0; i < 100; i++) {
3316 tg3_readphy(tp, MII_BMSR, &bmsr);
3317 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3318 (bmsr & BMSR_LSTATUS))
3323 if (bmsr & BMSR_LSTATUS) {
3326 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3327 for (i = 0; i < 2000; i++) {
3329 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3334 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3339 for (i = 0; i < 200; i++) {
3340 tg3_readphy(tp, MII_BMCR, &bmcr);
3341 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3343 if (bmcr && bmcr != 0x7fff)
3351 tp->link_config.active_speed = current_speed;
3352 tp->link_config.active_duplex = current_duplex;
3354 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3355 if ((bmcr & BMCR_ANENABLE) &&
3356 tg3_copper_is_advertising_all(tp,
3357 tp->link_config.advertising)) {
3358 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3360 current_link_up = 1;
3363 if (!(bmcr & BMCR_ANENABLE) &&
3364 tp->link_config.speed == current_speed &&
3365 tp->link_config.duplex == current_duplex &&
3366 tp->link_config.flowctrl ==
3367 tp->link_config.active_flowctrl) {
3368 current_link_up = 1;
3372 if (current_link_up == 1 &&
3373 tp->link_config.active_duplex == DUPLEX_FULL)
3374 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3378 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3379 tg3_phy_copper_begin(tp);
3381 tg3_readphy(tp, MII_BMSR, &bmsr);
3382 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3383 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3384 current_link_up = 1;
3387 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3388 if (current_link_up == 1) {
3389 if (tp->link_config.active_speed == SPEED_100 ||
3390 tp->link_config.active_speed == SPEED_10)
3391 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3393 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3394 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3395 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3397 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3399 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3400 if (tp->link_config.active_duplex == DUPLEX_HALF)
3401 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3403 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3404 if (current_link_up == 1 &&
3405 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3406 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3408 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3411 /* ??? Without this setting Netgear GA302T PHY does not
3412 * ??? send/receive packets...
3414 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3415 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3416 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3417 tw32_f(MAC_MI_MODE, tp->mi_mode);
3421 tw32_f(MAC_MODE, tp->mac_mode);
3424 tg3_phy_eee_adjust(tp, current_link_up);
3426 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3427 /* Polled via timer. */
3428 tw32_f(MAC_EVENT, 0);
3430 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3435 current_link_up == 1 &&
3436 tp->link_config.active_speed == SPEED_1000 &&
3437 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3440 (MAC_STATUS_SYNC_CHANGED |
3441 MAC_STATUS_CFG_CHANGED));
3444 NIC_SRAM_FIRMWARE_MBOX,
3445 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3448 /* Prevent send BD corruption. */
3449 if (tg3_flag(tp, CLKREQ_BUG)) {
3450 u16 oldlnkctl, newlnkctl;
3452 pci_read_config_word(tp->pdev,
3453 tp->pcie_cap + PCI_EXP_LNKCTL,
3455 if (tp->link_config.active_speed == SPEED_100 ||
3456 tp->link_config.active_speed == SPEED_10)
3457 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3459 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3460 if (newlnkctl != oldlnkctl)
3461 pci_write_config_word(tp->pdev,
3462 tp->pcie_cap + PCI_EXP_LNKCTL,
3466 if (current_link_up != netif_carrier_ok(tp->dev)) {
3467 if (current_link_up)
3468 netif_carrier_on(tp->dev);
3470 netif_carrier_off(tp->dev);
3471 tg3_link_report(tp);
3477 struct tg3_fiber_aneginfo {
3479 #define ANEG_STATE_UNKNOWN 0
3480 #define ANEG_STATE_AN_ENABLE 1
3481 #define ANEG_STATE_RESTART_INIT 2
3482 #define ANEG_STATE_RESTART 3
3483 #define ANEG_STATE_DISABLE_LINK_OK 4
3484 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3485 #define ANEG_STATE_ABILITY_DETECT 6
3486 #define ANEG_STATE_ACK_DETECT_INIT 7
3487 #define ANEG_STATE_ACK_DETECT 8
3488 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3489 #define ANEG_STATE_COMPLETE_ACK 10
3490 #define ANEG_STATE_IDLE_DETECT_INIT 11
3491 #define ANEG_STATE_IDLE_DETECT 12
3492 #define ANEG_STATE_LINK_OK 13
3493 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3494 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3497 #define MR_AN_ENABLE 0x00000001
3498 #define MR_RESTART_AN 0x00000002
3499 #define MR_AN_COMPLETE 0x00000004
3500 #define MR_PAGE_RX 0x00000008
3501 #define MR_NP_LOADED 0x00000010
3502 #define MR_TOGGLE_TX 0x00000020
3503 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3504 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3505 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3506 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3507 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3508 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3509 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3510 #define MR_TOGGLE_RX 0x00002000
3511 #define MR_NP_RX 0x00004000
3513 #define MR_LINK_OK 0x80000000
3515 unsigned long link_time, cur_time;
3517 u32 ability_match_cfg;
3518 int ability_match_count;
3520 char ability_match, idle_match, ack_match;
3522 u32 txconfig, rxconfig;
3523 #define ANEG_CFG_NP 0x00000080
3524 #define ANEG_CFG_ACK 0x00000040
3525 #define ANEG_CFG_RF2 0x00000020
3526 #define ANEG_CFG_RF1 0x00000010
3527 #define ANEG_CFG_PS2 0x00000001
3528 #define ANEG_CFG_PS1 0x00008000
3529 #define ANEG_CFG_HD 0x00004000
3530 #define ANEG_CFG_FD 0x00002000
3531 #define ANEG_CFG_INVAL 0x00001f06
3536 #define ANEG_TIMER_ENAB 2
3537 #define ANEG_FAILED -1
3539 #define ANEG_STATE_SETTLE_TIME 10000
3541 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3542 struct tg3_fiber_aneginfo *ap)
3545 unsigned long delta;
3549 if (ap->state == ANEG_STATE_UNKNOWN) {
3553 ap->ability_match_cfg = 0;
3554 ap->ability_match_count = 0;
3555 ap->ability_match = 0;
3561 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3562 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3564 if (rx_cfg_reg != ap->ability_match_cfg) {
3565 ap->ability_match_cfg = rx_cfg_reg;
3566 ap->ability_match = 0;
3567 ap->ability_match_count = 0;
3569 if (++ap->ability_match_count > 1) {
3570 ap->ability_match = 1;
3571 ap->ability_match_cfg = rx_cfg_reg;
3574 if (rx_cfg_reg & ANEG_CFG_ACK)
3582 ap->ability_match_cfg = 0;
3583 ap->ability_match_count = 0;
3584 ap->ability_match = 0;
3590 ap->rxconfig = rx_cfg_reg;
3593 switch (ap->state) {
3594 case ANEG_STATE_UNKNOWN:
3595 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3596 ap->state = ANEG_STATE_AN_ENABLE;
3599 case ANEG_STATE_AN_ENABLE:
3600 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3601 if (ap->flags & MR_AN_ENABLE) {
3604 ap->ability_match_cfg = 0;
3605 ap->ability_match_count = 0;
3606 ap->ability_match = 0;
3610 ap->state = ANEG_STATE_RESTART_INIT;
3612 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3616 case ANEG_STATE_RESTART_INIT:
3617 ap->link_time = ap->cur_time;
3618 ap->flags &= ~(MR_NP_LOADED);
3620 tw32(MAC_TX_AUTO_NEG, 0);
3621 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3622 tw32_f(MAC_MODE, tp->mac_mode);
3625 ret = ANEG_TIMER_ENAB;
3626 ap->state = ANEG_STATE_RESTART;
3629 case ANEG_STATE_RESTART:
3630 delta = ap->cur_time - ap->link_time;
3631 if (delta > ANEG_STATE_SETTLE_TIME)
3632 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3634 ret = ANEG_TIMER_ENAB;
3637 case ANEG_STATE_DISABLE_LINK_OK:
3641 case ANEG_STATE_ABILITY_DETECT_INIT:
3642 ap->flags &= ~(MR_TOGGLE_TX);
3643 ap->txconfig = ANEG_CFG_FD;
3644 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3645 if (flowctrl & ADVERTISE_1000XPAUSE)
3646 ap->txconfig |= ANEG_CFG_PS1;
3647 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3648 ap->txconfig |= ANEG_CFG_PS2;
3649 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3650 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3651 tw32_f(MAC_MODE, tp->mac_mode);
3654 ap->state = ANEG_STATE_ABILITY_DETECT;
3657 case ANEG_STATE_ABILITY_DETECT:
3658 if (ap->ability_match != 0 && ap->rxconfig != 0)
3659 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3662 case ANEG_STATE_ACK_DETECT_INIT:
3663 ap->txconfig |= ANEG_CFG_ACK;
3664 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3665 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3666 tw32_f(MAC_MODE, tp->mac_mode);
3669 ap->state = ANEG_STATE_ACK_DETECT;
3672 case ANEG_STATE_ACK_DETECT:
3673 if (ap->ack_match != 0) {
3674 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3675 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3676 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3678 ap->state = ANEG_STATE_AN_ENABLE;
3680 } else if (ap->ability_match != 0 &&
3681 ap->rxconfig == 0) {
3682 ap->state = ANEG_STATE_AN_ENABLE;
3686 case ANEG_STATE_COMPLETE_ACK_INIT:
3687 if (ap->rxconfig & ANEG_CFG_INVAL) {
3691 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3692 MR_LP_ADV_HALF_DUPLEX |
3693 MR_LP_ADV_SYM_PAUSE |
3694 MR_LP_ADV_ASYM_PAUSE |
3695 MR_LP_ADV_REMOTE_FAULT1 |
3696 MR_LP_ADV_REMOTE_FAULT2 |
3697 MR_LP_ADV_NEXT_PAGE |
3700 if (ap->rxconfig & ANEG_CFG_FD)
3701 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3702 if (ap->rxconfig & ANEG_CFG_HD)
3703 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3704 if (ap->rxconfig & ANEG_CFG_PS1)
3705 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3706 if (ap->rxconfig & ANEG_CFG_PS2)
3707 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3708 if (ap->rxconfig & ANEG_CFG_RF1)
3709 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3710 if (ap->rxconfig & ANEG_CFG_RF2)
3711 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3712 if (ap->rxconfig & ANEG_CFG_NP)
3713 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3715 ap->link_time = ap->cur_time;
3717 ap->flags ^= (MR_TOGGLE_TX);
3718 if (ap->rxconfig & 0x0008)
3719 ap->flags |= MR_TOGGLE_RX;
3720 if (ap->rxconfig & ANEG_CFG_NP)
3721 ap->flags |= MR_NP_RX;
3722 ap->flags |= MR_PAGE_RX;
3724 ap->state = ANEG_STATE_COMPLETE_ACK;
3725 ret = ANEG_TIMER_ENAB;
3728 case ANEG_STATE_COMPLETE_ACK:
3729 if (ap->ability_match != 0 &&
3730 ap->rxconfig == 0) {
3731 ap->state = ANEG_STATE_AN_ENABLE;
3734 delta = ap->cur_time - ap->link_time;
3735 if (delta > ANEG_STATE_SETTLE_TIME) {
3736 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3737 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3739 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3740 !(ap->flags & MR_NP_RX)) {
3741 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3749 case ANEG_STATE_IDLE_DETECT_INIT:
3750 ap->link_time = ap->cur_time;
3751 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3752 tw32_f(MAC_MODE, tp->mac_mode);
3755 ap->state = ANEG_STATE_IDLE_DETECT;
3756 ret = ANEG_TIMER_ENAB;
3759 case ANEG_STATE_IDLE_DETECT:
3760 if (ap->ability_match != 0 &&
3761 ap->rxconfig == 0) {
3762 ap->state = ANEG_STATE_AN_ENABLE;
3765 delta = ap->cur_time - ap->link_time;
3766 if (delta > ANEG_STATE_SETTLE_TIME) {
3767 /* XXX another gem from the Broadcom driver :( */
3768 ap->state = ANEG_STATE_LINK_OK;
3772 case ANEG_STATE_LINK_OK:
3773 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3777 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3778 /* ??? unimplemented */
3781 case ANEG_STATE_NEXT_PAGE_WAIT:
3782 /* ??? unimplemented */
3793 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3796 struct tg3_fiber_aneginfo aninfo;
3797 int status = ANEG_FAILED;
3801 tw32_f(MAC_TX_AUTO_NEG, 0);
3803 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3804 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3807 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3810 memset(&aninfo, 0, sizeof(aninfo));
3811 aninfo.flags |= MR_AN_ENABLE;
3812 aninfo.state = ANEG_STATE_UNKNOWN;
3813 aninfo.cur_time = 0;
3815 while (++tick < 195000) {
3816 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3817 if (status == ANEG_DONE || status == ANEG_FAILED)
3823 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3824 tw32_f(MAC_MODE, tp->mac_mode);
3827 *txflags = aninfo.txconfig;
3828 *rxflags = aninfo.flags;
3830 if (status == ANEG_DONE &&
3831 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3832 MR_LP_ADV_FULL_DUPLEX)))
3838 static void tg3_init_bcm8002(struct tg3 *tp)
3840 u32 mac_status = tr32(MAC_STATUS);
3843 /* Reset when initting first time or we have a link. */
3844 if (tg3_flag(tp, INIT_COMPLETE) &&
3845 !(mac_status & MAC_STATUS_PCS_SYNCED))
3848 /* Set PLL lock range. */
3849 tg3_writephy(tp, 0x16, 0x8007);
3852 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3854 /* Wait for reset to complete. */
3855 /* XXX schedule_timeout() ... */
3856 for (i = 0; i < 500; i++)
3859 /* Config mode; select PMA/Ch 1 regs. */
3860 tg3_writephy(tp, 0x10, 0x8411);
3862 /* Enable auto-lock and comdet, select txclk for tx. */
3863 tg3_writephy(tp, 0x11, 0x0a10);
3865 tg3_writephy(tp, 0x18, 0x00a0);
3866 tg3_writephy(tp, 0x16, 0x41ff);
3868 /* Assert and deassert POR. */
3869 tg3_writephy(tp, 0x13, 0x0400);
3871 tg3_writephy(tp, 0x13, 0x0000);
3873 tg3_writephy(tp, 0x11, 0x0a50);
3875 tg3_writephy(tp, 0x11, 0x0a10);
3877 /* Wait for signal to stabilize */
3878 /* XXX schedule_timeout() ... */
3879 for (i = 0; i < 15000; i++)
3882 /* Deselect the channel register so we can read the PHYID
3885 tg3_writephy(tp, 0x10, 0x8011);
3888 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3891 u32 sg_dig_ctrl, sg_dig_status;
3892 u32 serdes_cfg, expected_sg_dig_ctrl;
3893 int workaround, port_a;
3894 int current_link_up;
3897 expected_sg_dig_ctrl = 0;
3900 current_link_up = 0;
3902 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3903 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3905 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3908 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3909 /* preserve bits 20-23 for voltage regulator */
3910 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3913 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3915 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3916 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3918 u32 val = serdes_cfg;
3924 tw32_f(MAC_SERDES_CFG, val);
3927 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3929 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3930 tg3_setup_flow_control(tp, 0, 0);
3931 current_link_up = 1;
3936 /* Want auto-negotiation. */
3937 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3939 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3940 if (flowctrl & ADVERTISE_1000XPAUSE)
3941 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3942 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3943 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3945 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3946 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3947 tp->serdes_counter &&
3948 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3949 MAC_STATUS_RCVD_CFG)) ==
3950 MAC_STATUS_PCS_SYNCED)) {
3951 tp->serdes_counter--;
3952 current_link_up = 1;
3957 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3958 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3960 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3962 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3963 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3964 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3965 MAC_STATUS_SIGNAL_DET)) {
3966 sg_dig_status = tr32(SG_DIG_STATUS);
3967 mac_status = tr32(MAC_STATUS);
3969 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3970 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3971 u32 local_adv = 0, remote_adv = 0;
3973 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3974 local_adv |= ADVERTISE_1000XPAUSE;
3975 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3976 local_adv |= ADVERTISE_1000XPSE_ASYM;
3978 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3979 remote_adv |= LPA_1000XPAUSE;
3980 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3981 remote_adv |= LPA_1000XPAUSE_ASYM;
3983 tg3_setup_flow_control(tp, local_adv, remote_adv);
3984 current_link_up = 1;
3985 tp->serdes_counter = 0;
3986 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3987 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3988 if (tp->serdes_counter)
3989 tp->serdes_counter--;
3992 u32 val = serdes_cfg;
3999 tw32_f(MAC_SERDES_CFG, val);
4002 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4005 /* Link parallel detection - link is up */
4006 /* only if we have PCS_SYNC and not */
4007 /* receiving config code words */
4008 mac_status = tr32(MAC_STATUS);
4009 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4010 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4011 tg3_setup_flow_control(tp, 0, 0);
4012 current_link_up = 1;
4014 TG3_PHYFLG_PARALLEL_DETECT;
4015 tp->serdes_counter =
4016 SERDES_PARALLEL_DET_TIMEOUT;
4018 goto restart_autoneg;
4022 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4023 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4027 return current_link_up;
4030 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4032 int current_link_up = 0;
4034 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4037 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4038 u32 txflags, rxflags;
4041 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4042 u32 local_adv = 0, remote_adv = 0;
4044 if (txflags & ANEG_CFG_PS1)
4045 local_adv |= ADVERTISE_1000XPAUSE;
4046 if (txflags & ANEG_CFG_PS2)
4047 local_adv |= ADVERTISE_1000XPSE_ASYM;
4049 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4050 remote_adv |= LPA_1000XPAUSE;
4051 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4052 remote_adv |= LPA_1000XPAUSE_ASYM;
4054 tg3_setup_flow_control(tp, local_adv, remote_adv);
4056 current_link_up = 1;
4058 for (i = 0; i < 30; i++) {
4061 (MAC_STATUS_SYNC_CHANGED |
4062 MAC_STATUS_CFG_CHANGED));
4064 if ((tr32(MAC_STATUS) &
4065 (MAC_STATUS_SYNC_CHANGED |
4066 MAC_STATUS_CFG_CHANGED)) == 0)
4070 mac_status = tr32(MAC_STATUS);
4071 if (current_link_up == 0 &&
4072 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4073 !(mac_status & MAC_STATUS_RCVD_CFG))
4074 current_link_up = 1;
4076 tg3_setup_flow_control(tp, 0, 0);
4078 /* Forcing 1000FD link up. */
4079 current_link_up = 1;
4081 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4084 tw32_f(MAC_MODE, tp->mac_mode);
4089 return current_link_up;
4092 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4095 u16 orig_active_speed;
4096 u8 orig_active_duplex;
4098 int current_link_up;
4101 orig_pause_cfg = tp->link_config.active_flowctrl;
4102 orig_active_speed = tp->link_config.active_speed;
4103 orig_active_duplex = tp->link_config.active_duplex;
4105 if (!tg3_flag(tp, HW_AUTONEG) &&
4106 netif_carrier_ok(tp->dev) &&
4107 tg3_flag(tp, INIT_COMPLETE)) {
4108 mac_status = tr32(MAC_STATUS);
4109 mac_status &= (MAC_STATUS_PCS_SYNCED |
4110 MAC_STATUS_SIGNAL_DET |
4111 MAC_STATUS_CFG_CHANGED |
4112 MAC_STATUS_RCVD_CFG);
4113 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4114 MAC_STATUS_SIGNAL_DET)) {
4115 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4116 MAC_STATUS_CFG_CHANGED));
4121 tw32_f(MAC_TX_AUTO_NEG, 0);
4123 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4124 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4125 tw32_f(MAC_MODE, tp->mac_mode);
4128 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4129 tg3_init_bcm8002(tp);
4131 /* Enable link change event even when serdes polling. */
4132 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4135 current_link_up = 0;
4136 mac_status = tr32(MAC_STATUS);
4138 if (tg3_flag(tp, HW_AUTONEG))
4139 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4141 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4143 tp->napi[0].hw_status->status =
4144 (SD_STATUS_UPDATED |
4145 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4147 for (i = 0; i < 100; i++) {
4148 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4149 MAC_STATUS_CFG_CHANGED));
4151 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4152 MAC_STATUS_CFG_CHANGED |
4153 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4157 mac_status = tr32(MAC_STATUS);
4158 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4159 current_link_up = 0;
4160 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4161 tp->serdes_counter == 0) {
4162 tw32_f(MAC_MODE, (tp->mac_mode |
4163 MAC_MODE_SEND_CONFIGS));
4165 tw32_f(MAC_MODE, tp->mac_mode);
4169 if (current_link_up == 1) {
4170 tp->link_config.active_speed = SPEED_1000;
4171 tp->link_config.active_duplex = DUPLEX_FULL;
4172 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173 LED_CTRL_LNKLED_OVERRIDE |
4174 LED_CTRL_1000MBPS_ON));
4176 tp->link_config.active_speed = SPEED_INVALID;
4177 tp->link_config.active_duplex = DUPLEX_INVALID;
4178 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4179 LED_CTRL_LNKLED_OVERRIDE |
4180 LED_CTRL_TRAFFIC_OVERRIDE));
4183 if (current_link_up != netif_carrier_ok(tp->dev)) {
4184 if (current_link_up)
4185 netif_carrier_on(tp->dev);
4187 netif_carrier_off(tp->dev);
4188 tg3_link_report(tp);
4190 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4191 if (orig_pause_cfg != now_pause_cfg ||
4192 orig_active_speed != tp->link_config.active_speed ||
4193 orig_active_duplex != tp->link_config.active_duplex)
4194 tg3_link_report(tp);
4200 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4202 int current_link_up, err = 0;
4206 u32 local_adv, remote_adv;
4208 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4209 tw32_f(MAC_MODE, tp->mac_mode);
4215 (MAC_STATUS_SYNC_CHANGED |
4216 MAC_STATUS_CFG_CHANGED |
4217 MAC_STATUS_MI_COMPLETION |
4218 MAC_STATUS_LNKSTATE_CHANGED));
4224 current_link_up = 0;
4225 current_speed = SPEED_INVALID;
4226 current_duplex = DUPLEX_INVALID;
4228 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4231 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4232 bmsr |= BMSR_LSTATUS;
4234 bmsr &= ~BMSR_LSTATUS;
4237 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4239 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4240 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4241 /* do nothing, just check for link up at the end */
4242 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4245 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4246 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4247 ADVERTISE_1000XPAUSE |
4248 ADVERTISE_1000XPSE_ASYM |
4251 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4253 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4254 new_adv |= ADVERTISE_1000XHALF;
4255 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4256 new_adv |= ADVERTISE_1000XFULL;
4258 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4259 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4260 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4261 tg3_writephy(tp, MII_BMCR, bmcr);
4263 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4264 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4265 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4272 bmcr &= ~BMCR_SPEED1000;
4273 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4275 if (tp->link_config.duplex == DUPLEX_FULL)
4276 new_bmcr |= BMCR_FULLDPLX;
4278 if (new_bmcr != bmcr) {
4279 /* BMCR_SPEED1000 is a reserved bit that needs
4280 * to be set on write.
4282 new_bmcr |= BMCR_SPEED1000;
4284 /* Force a linkdown */
4285 if (netif_carrier_ok(tp->dev)) {
4288 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4289 adv &= ~(ADVERTISE_1000XFULL |
4290 ADVERTISE_1000XHALF |
4292 tg3_writephy(tp, MII_ADVERTISE, adv);
4293 tg3_writephy(tp, MII_BMCR, bmcr |
4297 netif_carrier_off(tp->dev);
4299 tg3_writephy(tp, MII_BMCR, new_bmcr);
4301 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4303 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4305 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4306 bmsr |= BMSR_LSTATUS;
4308 bmsr &= ~BMSR_LSTATUS;
4310 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4314 if (bmsr & BMSR_LSTATUS) {
4315 current_speed = SPEED_1000;
4316 current_link_up = 1;
4317 if (bmcr & BMCR_FULLDPLX)
4318 current_duplex = DUPLEX_FULL;
4320 current_duplex = DUPLEX_HALF;
4325 if (bmcr & BMCR_ANENABLE) {
4328 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4329 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4330 common = local_adv & remote_adv;
4331 if (common & (ADVERTISE_1000XHALF |
4332 ADVERTISE_1000XFULL)) {
4333 if (common & ADVERTISE_1000XFULL)
4334 current_duplex = DUPLEX_FULL;
4336 current_duplex = DUPLEX_HALF;
4337 } else if (!tg3_flag(tp, 5780_CLASS)) {
4338 /* Link is up via parallel detect */
4340 current_link_up = 0;
4345 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4346 tg3_setup_flow_control(tp, local_adv, remote_adv);
4348 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4349 if (tp->link_config.active_duplex == DUPLEX_HALF)
4350 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4352 tw32_f(MAC_MODE, tp->mac_mode);
4355 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4357 tp->link_config.active_speed = current_speed;
4358 tp->link_config.active_duplex = current_duplex;
4360 if (current_link_up != netif_carrier_ok(tp->dev)) {
4361 if (current_link_up)
4362 netif_carrier_on(tp->dev);
4364 netif_carrier_off(tp->dev);
4365 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4367 tg3_link_report(tp);
4372 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4374 if (tp->serdes_counter) {
4375 /* Give autoneg time to complete. */
4376 tp->serdes_counter--;
4380 if (!netif_carrier_ok(tp->dev) &&
4381 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4384 tg3_readphy(tp, MII_BMCR, &bmcr);
4385 if (bmcr & BMCR_ANENABLE) {
4388 /* Select shadow register 0x1f */
4389 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4390 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4392 /* Select expansion interrupt status register */
4393 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4394 MII_TG3_DSP_EXP1_INT_STAT);
4395 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4398 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4399 /* We have signal detect and not receiving
4400 * config code words, link is up by parallel
4404 bmcr &= ~BMCR_ANENABLE;
4405 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4406 tg3_writephy(tp, MII_BMCR, bmcr);
4407 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4410 } else if (netif_carrier_ok(tp->dev) &&
4411 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4412 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4415 /* Select expansion interrupt status register */
4416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4417 MII_TG3_DSP_EXP1_INT_STAT);
4418 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4422 /* Config code words received, turn on autoneg. */
4423 tg3_readphy(tp, MII_BMCR, &bmcr);
4424 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4426 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4432 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4437 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4438 err = tg3_setup_fiber_phy(tp, force_reset);
4439 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4440 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4442 err = tg3_setup_copper_phy(tp, force_reset);
4444 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4447 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4448 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4450 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4455 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4456 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4457 tw32(GRC_MISC_CFG, val);
4460 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4461 (6 << TX_LENGTHS_IPG_SHIFT);
4462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4463 val |= tr32(MAC_TX_LENGTHS) &
4464 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4465 TX_LENGTHS_CNT_DWN_VAL_MSK);
4467 if (tp->link_config.active_speed == SPEED_1000 &&
4468 tp->link_config.active_duplex == DUPLEX_HALF)
4469 tw32(MAC_TX_LENGTHS, val |
4470 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4472 tw32(MAC_TX_LENGTHS, val |
4473 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4475 if (!tg3_flag(tp, 5705_PLUS)) {
4476 if (netif_carrier_ok(tp->dev)) {
4477 tw32(HOSTCC_STAT_COAL_TICKS,
4478 tp->coal.stats_block_coalesce_usecs);
4480 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4484 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4485 val = tr32(PCIE_PWR_MGMT_THRESH);
4486 if (!netif_carrier_ok(tp->dev))
4487 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4490 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4491 tw32(PCIE_PWR_MGMT_THRESH, val);
4497 static inline int tg3_irq_sync(struct tg3 *tp)
4499 return tp->irq_sync;
4502 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4506 dst = (u32 *)((u8 *)dst + off);
4507 for (i = 0; i < len; i += sizeof(u32))
4508 *dst++ = tr32(off + i);
4511 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4513 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4514 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4515 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4516 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4517 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4518 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4519 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4520 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4521 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4522 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4523 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4524 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4525 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4526 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4527 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4528 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4529 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4530 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4531 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4533 if (tg3_flag(tp, SUPPORT_MSIX))
4534 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4536 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4537 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4538 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4539 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4540 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4541 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4542 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4543 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4545 if (!tg3_flag(tp, 5705_PLUS)) {
4546 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4547 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4548 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4551 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4552 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4553 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4554 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4555 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4557 if (tg3_flag(tp, NVRAM))
4558 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4561 static void tg3_dump_state(struct tg3 *tp)
4566 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4568 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4572 if (tg3_flag(tp, PCI_EXPRESS)) {
4573 /* Read up to but not including private PCI registers */
4574 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4575 regs[i / sizeof(u32)] = tr32(i);
4577 tg3_dump_legacy_regs(tp, regs);
4579 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4580 if (!regs[i + 0] && !regs[i + 1] &&
4581 !regs[i + 2] && !regs[i + 3])
4584 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4586 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4591 for (i = 0; i < tp->irq_cnt; i++) {
4592 struct tg3_napi *tnapi = &tp->napi[i];
4594 /* SW status block */
4596 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4598 tnapi->hw_status->status,
4599 tnapi->hw_status->status_tag,
4600 tnapi->hw_status->rx_jumbo_consumer,
4601 tnapi->hw_status->rx_consumer,
4602 tnapi->hw_status->rx_mini_consumer,
4603 tnapi->hw_status->idx[0].rx_producer,
4604 tnapi->hw_status->idx[0].tx_consumer);
4607 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4609 tnapi->last_tag, tnapi->last_irq_tag,
4610 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4612 tnapi->prodring.rx_std_prod_idx,
4613 tnapi->prodring.rx_std_cons_idx,
4614 tnapi->prodring.rx_jmb_prod_idx,
4615 tnapi->prodring.rx_jmb_cons_idx);
4619 /* This is called whenever we suspect that the system chipset is re-
4620 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4621 * is bogus tx completions. We try to recover by setting the
4622 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4625 static void tg3_tx_recover(struct tg3 *tp)
4627 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4628 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4630 netdev_warn(tp->dev,
4631 "The system may be re-ordering memory-mapped I/O "
4632 "cycles to the network device, attempting to recover. "
4633 "Please report the problem to the driver maintainer "
4634 "and include system chipset information.\n");
4636 spin_lock(&tp->lock);
4637 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4638 spin_unlock(&tp->lock);
4641 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4643 /* Tell compiler to fetch tx indices from memory. */
4645 return tnapi->tx_pending -
4646 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4649 /* Tigon3 never reports partial packet sends. So we do not
4650 * need special logic to handle SKBs that have not had all
4651 * of their frags sent yet, like SunGEM does.
4653 static void tg3_tx(struct tg3_napi *tnapi)
4655 struct tg3 *tp = tnapi->tp;
4656 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4657 u32 sw_idx = tnapi->tx_cons;
4658 struct netdev_queue *txq;
4659 int index = tnapi - tp->napi;
4661 if (tg3_flag(tp, ENABLE_TSS))
4664 txq = netdev_get_tx_queue(tp->dev, index);
4666 while (sw_idx != hw_idx) {
4667 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4668 struct sk_buff *skb = ri->skb;
4671 if (unlikely(skb == NULL)) {
4676 pci_unmap_single(tp->pdev,
4677 dma_unmap_addr(ri, mapping),
4683 sw_idx = NEXT_TX(sw_idx);
4685 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4686 ri = &tnapi->tx_buffers[sw_idx];
4687 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4690 pci_unmap_page(tp->pdev,
4691 dma_unmap_addr(ri, mapping),
4692 skb_shinfo(skb)->frags[i].size,
4694 sw_idx = NEXT_TX(sw_idx);
4699 if (unlikely(tx_bug)) {
4705 tnapi->tx_cons = sw_idx;
4707 /* Need to make the tx_cons update visible to tg3_start_xmit()
4708 * before checking for netif_queue_stopped(). Without the
4709 * memory barrier, there is a small possibility that tg3_start_xmit()
4710 * will miss it and cause the queue to be stopped forever.
4714 if (unlikely(netif_tx_queue_stopped(txq) &&
4715 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4716 __netif_tx_lock(txq, smp_processor_id());
4717 if (netif_tx_queue_stopped(txq) &&
4718 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4719 netif_tx_wake_queue(txq);
4720 __netif_tx_unlock(txq);
4724 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4729 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4730 map_sz, PCI_DMA_FROMDEVICE);
4731 dev_kfree_skb_any(ri->skb);
4735 /* Returns size of skb allocated or < 0 on error.
4737 * We only need to fill in the address because the other members
4738 * of the RX descriptor are invariant, see tg3_init_rings.
4740 * Note the purposeful assymetry of cpu vs. chip accesses. For
4741 * posting buffers we only dirty the first cache line of the RX
4742 * descriptor (containing the address). Whereas for the RX status
4743 * buffers the cpu only reads the last cacheline of the RX descriptor
4744 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4746 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4747 u32 opaque_key, u32 dest_idx_unmasked)
4749 struct tg3_rx_buffer_desc *desc;
4750 struct ring_info *map;
4751 struct sk_buff *skb;
4753 int skb_size, dest_idx;
4755 switch (opaque_key) {
4756 case RXD_OPAQUE_RING_STD:
4757 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4758 desc = &tpr->rx_std[dest_idx];
4759 map = &tpr->rx_std_buffers[dest_idx];
4760 skb_size = tp->rx_pkt_map_sz;
4763 case RXD_OPAQUE_RING_JUMBO:
4764 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4765 desc = &tpr->rx_jmb[dest_idx].std;
4766 map = &tpr->rx_jmb_buffers[dest_idx];
4767 skb_size = TG3_RX_JMB_MAP_SZ;
4774 /* Do not overwrite any of the map or rp information
4775 * until we are sure we can commit to a new buffer.
4777 * Callers depend upon this behavior and assume that
4778 * we leave everything unchanged if we fail.
4780 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4784 skb_reserve(skb, tp->rx_offset);
4786 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4787 PCI_DMA_FROMDEVICE);
4788 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4794 dma_unmap_addr_set(map, mapping, mapping);
4796 desc->addr_hi = ((u64)mapping >> 32);
4797 desc->addr_lo = ((u64)mapping & 0xffffffff);
4802 /* We only need to move over in the address because the other
4803 * members of the RX descriptor are invariant. See notes above
4804 * tg3_alloc_rx_skb for full details.
4806 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4807 struct tg3_rx_prodring_set *dpr,
4808 u32 opaque_key, int src_idx,
4809 u32 dest_idx_unmasked)
4811 struct tg3 *tp = tnapi->tp;
4812 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4813 struct ring_info *src_map, *dest_map;
4814 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4817 switch (opaque_key) {
4818 case RXD_OPAQUE_RING_STD:
4819 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4820 dest_desc = &dpr->rx_std[dest_idx];
4821 dest_map = &dpr->rx_std_buffers[dest_idx];
4822 src_desc = &spr->rx_std[src_idx];
4823 src_map = &spr->rx_std_buffers[src_idx];
4826 case RXD_OPAQUE_RING_JUMBO:
4827 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4828 dest_desc = &dpr->rx_jmb[dest_idx].std;
4829 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4830 src_desc = &spr->rx_jmb[src_idx].std;
4831 src_map = &spr->rx_jmb_buffers[src_idx];
4838 dest_map->skb = src_map->skb;
4839 dma_unmap_addr_set(dest_map, mapping,
4840 dma_unmap_addr(src_map, mapping));
4841 dest_desc->addr_hi = src_desc->addr_hi;
4842 dest_desc->addr_lo = src_desc->addr_lo;
4844 /* Ensure that the update to the skb happens after the physical
4845 * addresses have been transferred to the new BD location.
4849 src_map->skb = NULL;
4852 /* The RX ring scheme is composed of multiple rings which post fresh
4853 * buffers to the chip, and one special ring the chip uses to report
4854 * status back to the host.
4856 * The special ring reports the status of received packets to the
4857 * host. The chip does not write into the original descriptor the
4858 * RX buffer was obtained from. The chip simply takes the original
4859 * descriptor as provided by the host, updates the status and length
4860 * field, then writes this into the next status ring entry.
4862 * Each ring the host uses to post buffers to the chip is described
4863 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4864 * it is first placed into the on-chip ram. When the packet's length
4865 * is known, it walks down the TG3_BDINFO entries to select the ring.
4866 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4867 * which is within the range of the new packet's length is chosen.
4869 * The "separate ring for rx status" scheme may sound queer, but it makes
4870 * sense from a cache coherency perspective. If only the host writes
4871 * to the buffer post rings, and only the chip writes to the rx status
4872 * rings, then cache lines never move beyond shared-modified state.
4873 * If both the host and chip were to write into the same ring, cache line
4874 * eviction could occur since both entities want it in an exclusive state.
4876 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4878 struct tg3 *tp = tnapi->tp;
4879 u32 work_mask, rx_std_posted = 0;
4880 u32 std_prod_idx, jmb_prod_idx;
4881 u32 sw_idx = tnapi->rx_rcb_ptr;
4884 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4886 hw_idx = *(tnapi->rx_rcb_prod_idx);
4888 * We need to order the read of hw_idx and the read of
4889 * the opaque cookie.
4894 std_prod_idx = tpr->rx_std_prod_idx;
4895 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4896 while (sw_idx != hw_idx && budget > 0) {
4897 struct ring_info *ri;
4898 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4900 struct sk_buff *skb;
4901 dma_addr_t dma_addr;
4902 u32 opaque_key, desc_idx, *post_ptr;
4904 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4905 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4906 if (opaque_key == RXD_OPAQUE_RING_STD) {
4907 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4908 dma_addr = dma_unmap_addr(ri, mapping);
4910 post_ptr = &std_prod_idx;
4912 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4913 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4914 dma_addr = dma_unmap_addr(ri, mapping);
4916 post_ptr = &jmb_prod_idx;
4918 goto next_pkt_nopost;
4920 work_mask |= opaque_key;
4922 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4923 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4925 tg3_recycle_rx(tnapi, tpr, opaque_key,
4926 desc_idx, *post_ptr);
4928 /* Other statistics kept track of by card. */
4933 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4936 if (len > TG3_RX_COPY_THRESH(tp)) {
4939 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4944 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4945 PCI_DMA_FROMDEVICE);
4947 /* Ensure that the update to the skb happens
4948 * after the usage of the old DMA mapping.
4956 struct sk_buff *copy_skb;
4958 tg3_recycle_rx(tnapi, tpr, opaque_key,
4959 desc_idx, *post_ptr);
4961 copy_skb = netdev_alloc_skb(tp->dev, len +
4963 if (copy_skb == NULL)
4964 goto drop_it_no_recycle;
4966 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4967 skb_put(copy_skb, len);
4968 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969 skb_copy_from_linear_data(skb, copy_skb->data, len);
4970 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4972 /* We'll reuse the original ring buffer. */
4976 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4977 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4978 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4979 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4980 skb->ip_summed = CHECKSUM_UNNECESSARY;
4982 skb_checksum_none_assert(skb);
4984 skb->protocol = eth_type_trans(skb, tp->dev);
4986 if (len > (tp->dev->mtu + ETH_HLEN) &&
4987 skb->protocol != htons(ETH_P_8021Q)) {
4989 goto drop_it_no_recycle;
4992 if (desc->type_flags & RXD_FLAG_VLAN &&
4993 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4994 __vlan_hwaccel_put_tag(skb,
4995 desc->err_vlan & RXD_VLAN_MASK);
4997 napi_gro_receive(&tnapi->napi, skb);
5005 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5006 tpr->rx_std_prod_idx = std_prod_idx &
5007 tp->rx_std_ring_mask;
5008 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5009 tpr->rx_std_prod_idx);
5010 work_mask &= ~RXD_OPAQUE_RING_STD;
5015 sw_idx &= tp->rx_ret_ring_mask;
5017 /* Refresh hw_idx to see if there is new work */
5018 if (sw_idx == hw_idx) {
5019 hw_idx = *(tnapi->rx_rcb_prod_idx);
5024 /* ACK the status ring. */
5025 tnapi->rx_rcb_ptr = sw_idx;
5026 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5028 /* Refill RX ring(s). */
5029 if (!tg3_flag(tp, ENABLE_RSS)) {
5030 if (work_mask & RXD_OPAQUE_RING_STD) {
5031 tpr->rx_std_prod_idx = std_prod_idx &
5032 tp->rx_std_ring_mask;
5033 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5034 tpr->rx_std_prod_idx);
5036 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5037 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5038 tp->rx_jmb_ring_mask;
5039 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5040 tpr->rx_jmb_prod_idx);
5043 } else if (work_mask) {
5044 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5045 * updated before the producer indices can be updated.
5049 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5050 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5052 if (tnapi != &tp->napi[1])
5053 napi_schedule(&tp->napi[1].napi);
5059 static void tg3_poll_link(struct tg3 *tp)
5061 /* handle link change and other phy events */
5062 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5063 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5065 if (sblk->status & SD_STATUS_LINK_CHG) {
5066 sblk->status = SD_STATUS_UPDATED |
5067 (sblk->status & ~SD_STATUS_LINK_CHG);
5068 spin_lock(&tp->lock);
5069 if (tg3_flag(tp, USE_PHYLIB)) {
5071 (MAC_STATUS_SYNC_CHANGED |
5072 MAC_STATUS_CFG_CHANGED |
5073 MAC_STATUS_MI_COMPLETION |
5074 MAC_STATUS_LNKSTATE_CHANGED));
5077 tg3_setup_phy(tp, 0);
5078 spin_unlock(&tp->lock);
5083 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5084 struct tg3_rx_prodring_set *dpr,
5085 struct tg3_rx_prodring_set *spr)
5087 u32 si, di, cpycnt, src_prod_idx;
5091 src_prod_idx = spr->rx_std_prod_idx;
5093 /* Make sure updates to the rx_std_buffers[] entries and the
5094 * standard producer index are seen in the correct order.
5098 if (spr->rx_std_cons_idx == src_prod_idx)
5101 if (spr->rx_std_cons_idx < src_prod_idx)
5102 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5104 cpycnt = tp->rx_std_ring_mask + 1 -
5105 spr->rx_std_cons_idx;
5107 cpycnt = min(cpycnt,
5108 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5110 si = spr->rx_std_cons_idx;
5111 di = dpr->rx_std_prod_idx;
5113 for (i = di; i < di + cpycnt; i++) {
5114 if (dpr->rx_std_buffers[i].skb) {
5124 /* Ensure that updates to the rx_std_buffers ring and the
5125 * shadowed hardware producer ring from tg3_recycle_skb() are
5126 * ordered correctly WRT the skb check above.
5130 memcpy(&dpr->rx_std_buffers[di],
5131 &spr->rx_std_buffers[si],
5132 cpycnt * sizeof(struct ring_info));
5134 for (i = 0; i < cpycnt; i++, di++, si++) {
5135 struct tg3_rx_buffer_desc *sbd, *dbd;
5136 sbd = &spr->rx_std[si];
5137 dbd = &dpr->rx_std[di];
5138 dbd->addr_hi = sbd->addr_hi;
5139 dbd->addr_lo = sbd->addr_lo;
5142 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5143 tp->rx_std_ring_mask;
5144 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5145 tp->rx_std_ring_mask;
5149 src_prod_idx = spr->rx_jmb_prod_idx;
5151 /* Make sure updates to the rx_jmb_buffers[] entries and
5152 * the jumbo producer index are seen in the correct order.
5156 if (spr->rx_jmb_cons_idx == src_prod_idx)
5159 if (spr->rx_jmb_cons_idx < src_prod_idx)
5160 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5162 cpycnt = tp->rx_jmb_ring_mask + 1 -
5163 spr->rx_jmb_cons_idx;
5165 cpycnt = min(cpycnt,
5166 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5168 si = spr->rx_jmb_cons_idx;
5169 di = dpr->rx_jmb_prod_idx;
5171 for (i = di; i < di + cpycnt; i++) {
5172 if (dpr->rx_jmb_buffers[i].skb) {
5182 /* Ensure that updates to the rx_jmb_buffers ring and the
5183 * shadowed hardware producer ring from tg3_recycle_skb() are
5184 * ordered correctly WRT the skb check above.
5188 memcpy(&dpr->rx_jmb_buffers[di],
5189 &spr->rx_jmb_buffers[si],
5190 cpycnt * sizeof(struct ring_info));
5192 for (i = 0; i < cpycnt; i++, di++, si++) {
5193 struct tg3_rx_buffer_desc *sbd, *dbd;
5194 sbd = &spr->rx_jmb[si].std;
5195 dbd = &dpr->rx_jmb[di].std;
5196 dbd->addr_hi = sbd->addr_hi;
5197 dbd->addr_lo = sbd->addr_lo;
5200 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5201 tp->rx_jmb_ring_mask;
5202 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5203 tp->rx_jmb_ring_mask;
5209 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5211 struct tg3 *tp = tnapi->tp;
5213 /* run TX completion thread */
5214 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5216 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5220 /* run RX thread, within the bounds set by NAPI.
5221 * All RX "locking" is done by ensuring outside
5222 * code synchronizes with tg3->napi.poll()
5224 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5225 work_done += tg3_rx(tnapi, budget - work_done);
5227 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5228 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5230 u32 std_prod_idx = dpr->rx_std_prod_idx;
5231 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5233 for (i = 1; i < tp->irq_cnt; i++)
5234 err |= tg3_rx_prodring_xfer(tp, dpr,
5235 &tp->napi[i].prodring);
5239 if (std_prod_idx != dpr->rx_std_prod_idx)
5240 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5241 dpr->rx_std_prod_idx);
5243 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5244 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5245 dpr->rx_jmb_prod_idx);
5250 tw32_f(HOSTCC_MODE, tp->coal_now);
5256 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5258 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5259 struct tg3 *tp = tnapi->tp;
5261 struct tg3_hw_status *sblk = tnapi->hw_status;
5264 work_done = tg3_poll_work(tnapi, work_done, budget);
5266 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5269 if (unlikely(work_done >= budget))
5272 /* tp->last_tag is used in tg3_int_reenable() below
5273 * to tell the hw how much work has been processed,
5274 * so we must read it before checking for more work.
5276 tnapi->last_tag = sblk->status_tag;
5277 tnapi->last_irq_tag = tnapi->last_tag;
5280 /* check for RX/TX work to do */
5281 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5282 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5283 napi_complete(napi);
5284 /* Reenable interrupts. */
5285 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5294 /* work_done is guaranteed to be less than budget. */
5295 napi_complete(napi);
5296 schedule_work(&tp->reset_task);
5300 static void tg3_process_error(struct tg3 *tp)
5303 bool real_error = false;
5305 if (tg3_flag(tp, ERROR_PROCESSED))
5308 /* Check Flow Attention register */
5309 val = tr32(HOSTCC_FLOW_ATTN);
5310 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5311 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5315 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5316 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5320 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5321 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5330 tg3_flag_set(tp, ERROR_PROCESSED);
5331 schedule_work(&tp->reset_task);
5334 static int tg3_poll(struct napi_struct *napi, int budget)
5336 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5337 struct tg3 *tp = tnapi->tp;
5339 struct tg3_hw_status *sblk = tnapi->hw_status;
5342 if (sblk->status & SD_STATUS_ERROR)
5343 tg3_process_error(tp);
5347 work_done = tg3_poll_work(tnapi, work_done, budget);
5349 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5352 if (unlikely(work_done >= budget))
5355 if (tg3_flag(tp, TAGGED_STATUS)) {
5356 /* tp->last_tag is used in tg3_int_reenable() below
5357 * to tell the hw how much work has been processed,
5358 * so we must read it before checking for more work.
5360 tnapi->last_tag = sblk->status_tag;
5361 tnapi->last_irq_tag = tnapi->last_tag;
5364 sblk->status &= ~SD_STATUS_UPDATED;
5366 if (likely(!tg3_has_work(tnapi))) {
5367 napi_complete(napi);
5368 tg3_int_reenable(tnapi);
5376 /* work_done is guaranteed to be less than budget. */
5377 napi_complete(napi);
5378 schedule_work(&tp->reset_task);
5382 static void tg3_napi_disable(struct tg3 *tp)
5386 for (i = tp->irq_cnt - 1; i >= 0; i--)
5387 napi_disable(&tp->napi[i].napi);
5390 static void tg3_napi_enable(struct tg3 *tp)
5394 for (i = 0; i < tp->irq_cnt; i++)
5395 napi_enable(&tp->napi[i].napi);
5398 static void tg3_napi_init(struct tg3 *tp)
5402 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5403 for (i = 1; i < tp->irq_cnt; i++)
5404 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5407 static void tg3_napi_fini(struct tg3 *tp)
5411 for (i = 0; i < tp->irq_cnt; i++)
5412 netif_napi_del(&tp->napi[i].napi);
5415 static inline void tg3_netif_stop(struct tg3 *tp)
5417 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5418 tg3_napi_disable(tp);
5419 netif_tx_disable(tp->dev);
5422 static inline void tg3_netif_start(struct tg3 *tp)
5424 /* NOTE: unconditional netif_tx_wake_all_queues is only
5425 * appropriate so long as all callers are assured to
5426 * have free tx slots (such as after tg3_init_hw)
5428 netif_tx_wake_all_queues(tp->dev);
5430 tg3_napi_enable(tp);
5431 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5432 tg3_enable_ints(tp);
5435 static void tg3_irq_quiesce(struct tg3 *tp)
5439 BUG_ON(tp->irq_sync);
5444 for (i = 0; i < tp->irq_cnt; i++)
5445 synchronize_irq(tp->napi[i].irq_vec);
5448 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5449 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5450 * with as well. Most of the time, this is not necessary except when
5451 * shutting down the device.
5453 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5455 spin_lock_bh(&tp->lock);
5457 tg3_irq_quiesce(tp);
5460 static inline void tg3_full_unlock(struct tg3 *tp)
5462 spin_unlock_bh(&tp->lock);
5465 /* One-shot MSI handler - Chip automatically disables interrupt
5466 * after sending MSI so driver doesn't have to do it.
5468 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5470 struct tg3_napi *tnapi = dev_id;
5471 struct tg3 *tp = tnapi->tp;
5473 prefetch(tnapi->hw_status);
5475 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5477 if (likely(!tg3_irq_sync(tp)))
5478 napi_schedule(&tnapi->napi);
5483 /* MSI ISR - No need to check for interrupt sharing and no need to
5484 * flush status block and interrupt mailbox. PCI ordering rules
5485 * guarantee that MSI will arrive after the status block.
5487 static irqreturn_t tg3_msi(int irq, void *dev_id)
5489 struct tg3_napi *tnapi = dev_id;
5490 struct tg3 *tp = tnapi->tp;
5492 prefetch(tnapi->hw_status);
5494 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5496 * Writing any value to intr-mbox-0 clears PCI INTA# and
5497 * chip-internal interrupt pending events.
5498 * Writing non-zero to intr-mbox-0 additional tells the
5499 * NIC to stop sending us irqs, engaging "in-intr-handler"
5502 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5503 if (likely(!tg3_irq_sync(tp)))
5504 napi_schedule(&tnapi->napi);
5506 return IRQ_RETVAL(1);
5509 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5511 struct tg3_napi *tnapi = dev_id;
5512 struct tg3 *tp = tnapi->tp;
5513 struct tg3_hw_status *sblk = tnapi->hw_status;
5514 unsigned int handled = 1;
5516 /* In INTx mode, it is possible for the interrupt to arrive at
5517 * the CPU before the status block posted prior to the interrupt.
5518 * Reading the PCI State register will confirm whether the
5519 * interrupt is ours and will flush the status block.
5521 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5522 if (tg3_flag(tp, CHIP_RESETTING) ||
5523 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5530 * Writing any value to intr-mbox-0 clears PCI INTA# and
5531 * chip-internal interrupt pending events.
5532 * Writing non-zero to intr-mbox-0 additional tells the
5533 * NIC to stop sending us irqs, engaging "in-intr-handler"
5536 * Flush the mailbox to de-assert the IRQ immediately to prevent
5537 * spurious interrupts. The flush impacts performance but
5538 * excessive spurious interrupts can be worse in some cases.
5540 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5541 if (tg3_irq_sync(tp))
5543 sblk->status &= ~SD_STATUS_UPDATED;
5544 if (likely(tg3_has_work(tnapi))) {
5545 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5546 napi_schedule(&tnapi->napi);
5548 /* No work, shared interrupt perhaps? re-enable
5549 * interrupts, and flush that PCI write
5551 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5555 return IRQ_RETVAL(handled);
5558 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5560 struct tg3_napi *tnapi = dev_id;
5561 struct tg3 *tp = tnapi->tp;
5562 struct tg3_hw_status *sblk = tnapi->hw_status;
5563 unsigned int handled = 1;
5565 /* In INTx mode, it is possible for the interrupt to arrive at
5566 * the CPU before the status block posted prior to the interrupt.
5567 * Reading the PCI State register will confirm whether the
5568 * interrupt is ours and will flush the status block.
5570 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5571 if (tg3_flag(tp, CHIP_RESETTING) ||
5572 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5579 * writing any value to intr-mbox-0 clears PCI INTA# and
5580 * chip-internal interrupt pending events.
5581 * writing non-zero to intr-mbox-0 additional tells the
5582 * NIC to stop sending us irqs, engaging "in-intr-handler"
5585 * Flush the mailbox to de-assert the IRQ immediately to prevent
5586 * spurious interrupts. The flush impacts performance but
5587 * excessive spurious interrupts can be worse in some cases.
5589 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5592 * In a shared interrupt configuration, sometimes other devices'
5593 * interrupts will scream. We record the current status tag here
5594 * so that the above check can report that the screaming interrupts
5595 * are unhandled. Eventually they will be silenced.
5597 tnapi->last_irq_tag = sblk->status_tag;
5599 if (tg3_irq_sync(tp))
5602 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5604 napi_schedule(&tnapi->napi);
5607 return IRQ_RETVAL(handled);
5610 /* ISR for interrupt test */
5611 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5613 struct tg3_napi *tnapi = dev_id;
5614 struct tg3 *tp = tnapi->tp;
5615 struct tg3_hw_status *sblk = tnapi->hw_status;
5617 if ((sblk->status & SD_STATUS_UPDATED) ||
5618 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5619 tg3_disable_ints(tp);
5620 return IRQ_RETVAL(1);
5622 return IRQ_RETVAL(0);
5625 static int tg3_init_hw(struct tg3 *, int);
5626 static int tg3_halt(struct tg3 *, int, int);
5628 /* Restart hardware after configuration changes, self-test, etc.
5629 * Invoked with tp->lock held.
5631 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5632 __releases(tp->lock)
5633 __acquires(tp->lock)
5637 err = tg3_init_hw(tp, reset_phy);
5640 "Failed to re-initialize device, aborting\n");
5641 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5642 tg3_full_unlock(tp);
5643 del_timer_sync(&tp->timer);
5645 tg3_napi_enable(tp);
5647 tg3_full_lock(tp, 0);
5652 #ifdef CONFIG_NET_POLL_CONTROLLER
5653 static void tg3_poll_controller(struct net_device *dev)
5656 struct tg3 *tp = netdev_priv(dev);
5658 for (i = 0; i < tp->irq_cnt; i++)
5659 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5663 static void tg3_reset_task(struct work_struct *work)
5665 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5667 unsigned int restart_timer;
5669 tg3_full_lock(tp, 0);
5671 if (!netif_running(tp->dev)) {
5672 tg3_full_unlock(tp);
5676 tg3_full_unlock(tp);
5682 tg3_full_lock(tp, 1);
5684 restart_timer = tg3_flag(tp, RESTART_TIMER);
5685 tg3_flag_clear(tp, RESTART_TIMER);
5687 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5688 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5689 tp->write32_rx_mbox = tg3_write_flush_reg32;
5690 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5691 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5694 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5695 err = tg3_init_hw(tp, 1);
5699 tg3_netif_start(tp);
5702 mod_timer(&tp->timer, jiffies + 1);
5705 tg3_full_unlock(tp);
5711 static void tg3_tx_timeout(struct net_device *dev)
5713 struct tg3 *tp = netdev_priv(dev);
5715 if (netif_msg_tx_err(tp)) {
5716 netdev_err(dev, "transmit timed out, resetting\n");
5720 schedule_work(&tp->reset_task);
5723 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5724 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5726 u32 base = (u32) mapping & 0xffffffff;
5728 return (base > 0xffffdcc0) && (base + len + 8 < base);
5731 /* Test for DMA addresses > 40-bit */
5732 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5735 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5736 if (tg3_flag(tp, 40BIT_DMA_BUG))
5737 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5744 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5745 dma_addr_t mapping, int len, u32 flags,
5748 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5749 int is_end = (mss_and_is_end & 0x1);
5750 u32 mss = (mss_and_is_end >> 1);
5754 flags |= TXD_FLAG_END;
5755 if (flags & TXD_FLAG_VLAN) {
5756 vlan_tag = flags >> 16;
5759 vlan_tag |= (mss << TXD_MSS_SHIFT);
5761 txd->addr_hi = ((u64) mapping >> 32);
5762 txd->addr_lo = ((u64) mapping & 0xffffffff);
5763 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5764 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5767 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5768 struct sk_buff *skb, int last)
5771 u32 entry = tnapi->tx_prod;
5772 struct ring_info *txb = &tnapi->tx_buffers[entry];
5774 pci_unmap_single(tnapi->tp->pdev,
5775 dma_unmap_addr(txb, mapping),
5778 for (i = 0; i < last; i++) {
5779 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5781 entry = NEXT_TX(entry);
5782 txb = &tnapi->tx_buffers[entry];
5784 pci_unmap_page(tnapi->tp->pdev,
5785 dma_unmap_addr(txb, mapping),
5786 frag->size, PCI_DMA_TODEVICE);
5790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5792 struct sk_buff *skb,
5793 u32 base_flags, u32 mss)
5795 struct tg3 *tp = tnapi->tp;
5796 struct sk_buff *new_skb;
5797 dma_addr_t new_addr = 0;
5798 u32 entry = tnapi->tx_prod;
5801 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5802 new_skb = skb_copy(skb, GFP_ATOMIC);
5804 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5806 new_skb = skb_copy_expand(skb,
5807 skb_headroom(skb) + more_headroom,
5808 skb_tailroom(skb), GFP_ATOMIC);
5814 /* New SKB is guaranteed to be linear. */
5815 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5817 /* Make sure the mapping succeeded */
5818 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5820 dev_kfree_skb(new_skb);
5822 /* Make sure new skb does not cross any 4G boundaries.
5823 * Drop the packet if it does.
5825 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5826 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5827 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5830 dev_kfree_skb(new_skb);
5832 tnapi->tx_buffers[entry].skb = new_skb;
5833 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5836 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5837 base_flags, 1 | (mss << 1));
5846 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5848 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5849 * TSO header is greater than 80 bytes.
5851 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5853 struct sk_buff *segs, *nskb;
5854 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5856 /* Estimate the number of fragments in the worst case */
5857 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5858 netif_stop_queue(tp->dev);
5860 /* netif_tx_stop_queue() must be done before checking
5861 * checking tx index in tg3_tx_avail() below, because in
5862 * tg3_tx(), we update tx index before checking for
5863 * netif_tx_queue_stopped().
5866 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5867 return NETDEV_TX_BUSY;
5869 netif_wake_queue(tp->dev);
5872 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5874 goto tg3_tso_bug_end;
5880 tg3_start_xmit(nskb, tp->dev);
5886 return NETDEV_TX_OK;
5889 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5890 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5892 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5894 struct tg3 *tp = netdev_priv(dev);
5895 u32 len, entry, base_flags, mss;
5896 int i = -1, would_hit_hwbug;
5898 struct tg3_napi *tnapi;
5899 struct netdev_queue *txq;
5902 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5903 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5904 if (tg3_flag(tp, ENABLE_TSS))
5907 /* We are running in BH disabled context with netif_tx_lock
5908 * and TX reclaim runs via tp->napi.poll inside of a software
5909 * interrupt. Furthermore, IRQ processing runs lockless so we have
5910 * no IRQ context deadlocks to worry about either. Rejoice!
5912 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5913 if (!netif_tx_queue_stopped(txq)) {
5914 netif_tx_stop_queue(txq);
5916 /* This is a hard error, log it. */
5918 "BUG! Tx Ring full when queue awake!\n");
5920 return NETDEV_TX_BUSY;
5923 entry = tnapi->tx_prod;
5925 if (skb->ip_summed == CHECKSUM_PARTIAL)
5926 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5928 mss = skb_shinfo(skb)->gso_size;
5931 u32 tcp_opt_len, hdr_len;
5933 if (skb_header_cloned(skb) &&
5934 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5940 tcp_opt_len = tcp_optlen(skb);
5942 if (skb_is_gso_v6(skb)) {
5943 hdr_len = skb_headlen(skb) - ETH_HLEN;
5947 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5948 hdr_len = ip_tcp_len + tcp_opt_len;
5951 iph->tot_len = htons(mss + hdr_len);
5954 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5955 tg3_flag(tp, TSO_BUG))
5956 return tg3_tso_bug(tp, skb);
5958 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5959 TXD_FLAG_CPU_POST_DMA);
5961 if (tg3_flag(tp, HW_TSO_1) ||
5962 tg3_flag(tp, HW_TSO_2) ||
5963 tg3_flag(tp, HW_TSO_3)) {
5964 tcp_hdr(skb)->check = 0;
5965 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5967 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5972 if (tg3_flag(tp, HW_TSO_3)) {
5973 mss |= (hdr_len & 0xc) << 12;
5975 base_flags |= 0x00000010;
5976 base_flags |= (hdr_len & 0x3e0) << 5;
5977 } else if (tg3_flag(tp, HW_TSO_2))
5978 mss |= hdr_len << 9;
5979 else if (tg3_flag(tp, HW_TSO_1) ||
5980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5981 if (tcp_opt_len || iph->ihl > 5) {
5984 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5985 mss |= (tsflags << 11);
5988 if (tcp_opt_len || iph->ihl > 5) {
5991 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5992 base_flags |= tsflags << 12;
5997 if (vlan_tx_tag_present(skb))
5998 base_flags |= (TXD_FLAG_VLAN |
5999 (vlan_tx_tag_get(skb) << 16));
6001 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6002 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6003 base_flags |= TXD_FLAG_JMB_PKT;
6005 len = skb_headlen(skb);
6007 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6008 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6013 tnapi->tx_buffers[entry].skb = skb;
6014 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6016 would_hit_hwbug = 0;
6018 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6019 would_hit_hwbug = 1;
6021 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6022 tg3_4g_overflow_test(mapping, len))
6023 would_hit_hwbug = 1;
6025 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6026 tg3_40bit_overflow_test(tp, mapping, len))
6027 would_hit_hwbug = 1;
6029 if (tg3_flag(tp, 5701_DMA_BUG))
6030 would_hit_hwbug = 1;
6032 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6033 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6035 entry = NEXT_TX(entry);
6037 /* Now loop through additional data fragments, and queue them. */
6038 if (skb_shinfo(skb)->nr_frags > 0) {
6039 last = skb_shinfo(skb)->nr_frags - 1;
6040 for (i = 0; i <= last; i++) {
6041 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6044 mapping = pci_map_page(tp->pdev,
6047 len, PCI_DMA_TODEVICE);
6049 tnapi->tx_buffers[entry].skb = NULL;
6050 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6052 if (pci_dma_mapping_error(tp->pdev, mapping))
6055 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6057 would_hit_hwbug = 1;
6059 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6060 tg3_4g_overflow_test(mapping, len))
6061 would_hit_hwbug = 1;
6063 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6064 tg3_40bit_overflow_test(tp, mapping, len))
6065 would_hit_hwbug = 1;
6067 if (tg3_flag(tp, HW_TSO_1) ||
6068 tg3_flag(tp, HW_TSO_2) ||
6069 tg3_flag(tp, HW_TSO_3))
6070 tg3_set_txd(tnapi, entry, mapping, len,
6071 base_flags, (i == last)|(mss << 1));
6073 tg3_set_txd(tnapi, entry, mapping, len,
6074 base_flags, (i == last));
6076 entry = NEXT_TX(entry);
6080 if (would_hit_hwbug) {
6081 tg3_skb_error_unmap(tnapi, skb, i);
6083 /* If the workaround fails due to memory/mapping
6084 * failure, silently drop this packet.
6086 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6089 entry = NEXT_TX(tnapi->tx_prod);
6092 /* Packets are ready, update Tx producer idx local and on card. */
6093 tw32_tx_mbox(tnapi->prodmbox, entry);
6095 tnapi->tx_prod = entry;
6096 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6097 netif_tx_stop_queue(txq);
6099 /* netif_tx_stop_queue() must be done before checking
6100 * checking tx index in tg3_tx_avail() below, because in
6101 * tg3_tx(), we update tx index before checking for
6102 * netif_tx_queue_stopped().
6105 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6106 netif_tx_wake_queue(txq);
6112 return NETDEV_TX_OK;
6115 tg3_skb_error_unmap(tnapi, skb, i);
6117 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6118 return NETDEV_TX_OK;
6121 static void tg3_set_loopback(struct net_device *dev, u32 features)
6123 struct tg3 *tp = netdev_priv(dev);
6125 if (features & NETIF_F_LOOPBACK) {
6126 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6130 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6131 * loopback mode if Half-Duplex mode was negotiated earlier.
6133 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6135 /* Enable internal MAC loopback mode */
6136 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6137 spin_lock_bh(&tp->lock);
6138 tw32(MAC_MODE, tp->mac_mode);
6139 netif_carrier_on(tp->dev);
6140 spin_unlock_bh(&tp->lock);
6141 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6143 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6146 /* Disable internal MAC loopback mode */
6147 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6148 spin_lock_bh(&tp->lock);
6149 tw32(MAC_MODE, tp->mac_mode);
6150 /* Force link status check */
6151 tg3_setup_phy(tp, 1);
6152 spin_unlock_bh(&tp->lock);
6153 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6157 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6159 struct tg3 *tp = netdev_priv(dev);
6161 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6162 features &= ~NETIF_F_ALL_TSO;
6167 static int tg3_set_features(struct net_device *dev, u32 features)
6169 u32 changed = dev->features ^ features;
6171 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6172 tg3_set_loopback(dev, features);
6177 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6182 if (new_mtu > ETH_DATA_LEN) {
6183 if (tg3_flag(tp, 5780_CLASS)) {
6184 netdev_update_features(dev);
6185 tg3_flag_clear(tp, TSO_CAPABLE);
6187 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6190 if (tg3_flag(tp, 5780_CLASS)) {
6191 tg3_flag_set(tp, TSO_CAPABLE);
6192 netdev_update_features(dev);
6194 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6198 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6200 struct tg3 *tp = netdev_priv(dev);
6203 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6206 if (!netif_running(dev)) {
6207 /* We'll just catch it later when the
6210 tg3_set_mtu(dev, tp, new_mtu);
6218 tg3_full_lock(tp, 1);
6220 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6222 tg3_set_mtu(dev, tp, new_mtu);
6224 err = tg3_restart_hw(tp, 0);
6227 tg3_netif_start(tp);
6229 tg3_full_unlock(tp);
6237 static void tg3_rx_prodring_free(struct tg3 *tp,
6238 struct tg3_rx_prodring_set *tpr)
6242 if (tpr != &tp->napi[0].prodring) {
6243 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6244 i = (i + 1) & tp->rx_std_ring_mask)
6245 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6248 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6249 for (i = tpr->rx_jmb_cons_idx;
6250 i != tpr->rx_jmb_prod_idx;
6251 i = (i + 1) & tp->rx_jmb_ring_mask) {
6252 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6260 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6261 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6264 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6265 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6266 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6271 /* Initialize rx rings for packet processing.
6273 * The chip has been shut down and the driver detached from
6274 * the networking, so no interrupts or new tx packets will
6275 * end up in the driver. tp->{tx,}lock are held and thus
6278 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6279 struct tg3_rx_prodring_set *tpr)
6281 u32 i, rx_pkt_dma_sz;
6283 tpr->rx_std_cons_idx = 0;
6284 tpr->rx_std_prod_idx = 0;
6285 tpr->rx_jmb_cons_idx = 0;
6286 tpr->rx_jmb_prod_idx = 0;
6288 if (tpr != &tp->napi[0].prodring) {
6289 memset(&tpr->rx_std_buffers[0], 0,
6290 TG3_RX_STD_BUFF_RING_SIZE(tp));
6291 if (tpr->rx_jmb_buffers)
6292 memset(&tpr->rx_jmb_buffers[0], 0,
6293 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6297 /* Zero out all descriptors. */
6298 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6300 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6301 if (tg3_flag(tp, 5780_CLASS) &&
6302 tp->dev->mtu > ETH_DATA_LEN)
6303 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6304 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6306 /* Initialize invariants of the rings, we only set this
6307 * stuff once. This works because the card does not
6308 * write into the rx buffer posting rings.
6310 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6311 struct tg3_rx_buffer_desc *rxd;
6313 rxd = &tpr->rx_std[i];
6314 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6315 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6316 rxd->opaque = (RXD_OPAQUE_RING_STD |
6317 (i << RXD_OPAQUE_INDEX_SHIFT));
6320 /* Now allocate fresh SKBs for each rx ring. */
6321 for (i = 0; i < tp->rx_pending; i++) {
6322 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6323 netdev_warn(tp->dev,
6324 "Using a smaller RX standard ring. Only "
6325 "%d out of %d buffers were allocated "
6326 "successfully\n", i, tp->rx_pending);
6334 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6337 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6339 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6342 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6343 struct tg3_rx_buffer_desc *rxd;
6345 rxd = &tpr->rx_jmb[i].std;
6346 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6347 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6349 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6350 (i << RXD_OPAQUE_INDEX_SHIFT));
6353 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6354 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6355 netdev_warn(tp->dev,
6356 "Using a smaller RX jumbo ring. Only %d "
6357 "out of %d buffers were allocated "
6358 "successfully\n", i, tp->rx_jumbo_pending);
6361 tp->rx_jumbo_pending = i;
6370 tg3_rx_prodring_free(tp, tpr);
6374 static void tg3_rx_prodring_fini(struct tg3 *tp,
6375 struct tg3_rx_prodring_set *tpr)
6377 kfree(tpr->rx_std_buffers);
6378 tpr->rx_std_buffers = NULL;
6379 kfree(tpr->rx_jmb_buffers);
6380 tpr->rx_jmb_buffers = NULL;
6382 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6383 tpr->rx_std, tpr->rx_std_mapping);
6387 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6388 tpr->rx_jmb, tpr->rx_jmb_mapping);
6393 static int tg3_rx_prodring_init(struct tg3 *tp,
6394 struct tg3_rx_prodring_set *tpr)
6396 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6398 if (!tpr->rx_std_buffers)
6401 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6402 TG3_RX_STD_RING_BYTES(tp),
6403 &tpr->rx_std_mapping,
6408 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6409 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6411 if (!tpr->rx_jmb_buffers)
6414 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6415 TG3_RX_JMB_RING_BYTES(tp),
6416 &tpr->rx_jmb_mapping,
6425 tg3_rx_prodring_fini(tp, tpr);
6429 /* Free up pending packets in all rx/tx rings.
6431 * The chip has been shut down and the driver detached from
6432 * the networking, so no interrupts or new tx packets will
6433 * end up in the driver. tp->{tx,}lock is not held and we are not
6434 * in an interrupt context and thus may sleep.
6436 static void tg3_free_rings(struct tg3 *tp)
6440 for (j = 0; j < tp->irq_cnt; j++) {
6441 struct tg3_napi *tnapi = &tp->napi[j];
6443 tg3_rx_prodring_free(tp, &tnapi->prodring);
6445 if (!tnapi->tx_buffers)
6448 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6449 struct ring_info *txp;
6450 struct sk_buff *skb;
6453 txp = &tnapi->tx_buffers[i];
6461 pci_unmap_single(tp->pdev,
6462 dma_unmap_addr(txp, mapping),
6469 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6470 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6471 pci_unmap_page(tp->pdev,
6472 dma_unmap_addr(txp, mapping),
6473 skb_shinfo(skb)->frags[k].size,
6478 dev_kfree_skb_any(skb);
6483 /* Initialize tx/rx rings for packet processing.
6485 * The chip has been shut down and the driver detached from
6486 * the networking, so no interrupts or new tx packets will
6487 * end up in the driver. tp->{tx,}lock are held and thus
6490 static int tg3_init_rings(struct tg3 *tp)
6494 /* Free up all the SKBs. */
6497 for (i = 0; i < tp->irq_cnt; i++) {
6498 struct tg3_napi *tnapi = &tp->napi[i];
6500 tnapi->last_tag = 0;
6501 tnapi->last_irq_tag = 0;
6502 tnapi->hw_status->status = 0;
6503 tnapi->hw_status->status_tag = 0;
6504 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6509 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6511 tnapi->rx_rcb_ptr = 0;
6513 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6515 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6525 * Must not be invoked with interrupt sources disabled and
6526 * the hardware shutdown down.
6528 static void tg3_free_consistent(struct tg3 *tp)
6532 for (i = 0; i < tp->irq_cnt; i++) {
6533 struct tg3_napi *tnapi = &tp->napi[i];
6535 if (tnapi->tx_ring) {
6536 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6537 tnapi->tx_ring, tnapi->tx_desc_mapping);
6538 tnapi->tx_ring = NULL;
6541 kfree(tnapi->tx_buffers);
6542 tnapi->tx_buffers = NULL;
6544 if (tnapi->rx_rcb) {
6545 dma_free_coherent(&tp->pdev->dev,
6546 TG3_RX_RCB_RING_BYTES(tp),
6548 tnapi->rx_rcb_mapping);
6549 tnapi->rx_rcb = NULL;
6552 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6554 if (tnapi->hw_status) {
6555 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6557 tnapi->status_mapping);
6558 tnapi->hw_status = NULL;
6563 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6564 tp->hw_stats, tp->stats_mapping);
6565 tp->hw_stats = NULL;
6570 * Must not be invoked with interrupt sources disabled and
6571 * the hardware shutdown down. Can sleep.
6573 static int tg3_alloc_consistent(struct tg3 *tp)
6577 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6578 sizeof(struct tg3_hw_stats),
6584 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6586 for (i = 0; i < tp->irq_cnt; i++) {
6587 struct tg3_napi *tnapi = &tp->napi[i];
6588 struct tg3_hw_status *sblk;
6590 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6592 &tnapi->status_mapping,
6594 if (!tnapi->hw_status)
6597 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6598 sblk = tnapi->hw_status;
6600 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6603 /* If multivector TSS is enabled, vector 0 does not handle
6604 * tx interrupts. Don't allocate any resources for it.
6606 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6607 (i && tg3_flag(tp, ENABLE_TSS))) {
6608 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6611 if (!tnapi->tx_buffers)
6614 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6616 &tnapi->tx_desc_mapping,
6618 if (!tnapi->tx_ring)
6623 * When RSS is enabled, the status block format changes
6624 * slightly. The "rx_jumbo_consumer", "reserved",
6625 * and "rx_mini_consumer" members get mapped to the
6626 * other three rx return ring producer indexes.
6630 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6633 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6636 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6639 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6644 * If multivector RSS is enabled, vector 0 does not handle
6645 * rx or tx interrupts. Don't allocate any resources for it.
6647 if (!i && tg3_flag(tp, ENABLE_RSS))
6650 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6651 TG3_RX_RCB_RING_BYTES(tp),
6652 &tnapi->rx_rcb_mapping,
6657 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6663 tg3_free_consistent(tp);
6667 #define MAX_WAIT_CNT 1000
6669 /* To stop a block, clear the enable bit and poll till it
6670 * clears. tp->lock is held.
6672 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6677 if (tg3_flag(tp, 5705_PLUS)) {
6684 /* We can't enable/disable these bits of the
6685 * 5705/5750, just say success.
6698 for (i = 0; i < MAX_WAIT_CNT; i++) {
6701 if ((val & enable_bit) == 0)
6705 if (i == MAX_WAIT_CNT && !silent) {
6706 dev_err(&tp->pdev->dev,
6707 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6715 /* tp->lock is held. */
6716 static int tg3_abort_hw(struct tg3 *tp, int silent)
6720 tg3_disable_ints(tp);
6722 tp->rx_mode &= ~RX_MODE_ENABLE;
6723 tw32_f(MAC_RX_MODE, tp->rx_mode);
6726 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6727 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6728 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6729 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6730 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6731 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6733 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6734 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6735 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6736 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6737 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6738 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6739 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6741 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6742 tw32_f(MAC_MODE, tp->mac_mode);
6745 tp->tx_mode &= ~TX_MODE_ENABLE;
6746 tw32_f(MAC_TX_MODE, tp->tx_mode);
6748 for (i = 0; i < MAX_WAIT_CNT; i++) {
6750 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6753 if (i >= MAX_WAIT_CNT) {
6754 dev_err(&tp->pdev->dev,
6755 "%s timed out, TX_MODE_ENABLE will not clear "
6756 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6760 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6761 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6762 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6764 tw32(FTQ_RESET, 0xffffffff);
6765 tw32(FTQ_RESET, 0x00000000);
6767 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6768 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6770 for (i = 0; i < tp->irq_cnt; i++) {
6771 struct tg3_napi *tnapi = &tp->napi[i];
6772 if (tnapi->hw_status)
6773 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6776 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6781 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6786 /* NCSI does not support APE events */
6787 if (tg3_flag(tp, APE_HAS_NCSI))
6790 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6791 if (apedata != APE_SEG_SIG_MAGIC)
6794 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6795 if (!(apedata & APE_FW_STATUS_READY))
6798 /* Wait for up to 1 millisecond for APE to service previous event. */
6799 for (i = 0; i < 10; i++) {
6800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6805 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6806 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6807 event | APE_EVENT_STATUS_EVENT_PENDING);
6809 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6811 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6817 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6818 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6821 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6826 if (!tg3_flag(tp, ENABLE_APE))
6830 case RESET_KIND_INIT:
6831 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6832 APE_HOST_SEG_SIG_MAGIC);
6833 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6834 APE_HOST_SEG_LEN_MAGIC);
6835 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6836 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6837 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6838 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6839 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6840 APE_HOST_BEHAV_NO_PHYLOCK);
6841 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6842 TG3_APE_HOST_DRVR_STATE_START);
6844 event = APE_EVENT_STATUS_STATE_START;
6846 case RESET_KIND_SHUTDOWN:
6847 /* With the interface we are currently using,
6848 * APE does not track driver state. Wiping
6849 * out the HOST SEGMENT SIGNATURE forces
6850 * the APE to assume OS absent status.
6852 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6854 if (device_may_wakeup(&tp->pdev->dev) &&
6855 tg3_flag(tp, WOL_ENABLE)) {
6856 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6857 TG3_APE_HOST_WOL_SPEED_AUTO);
6858 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6860 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6862 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6864 event = APE_EVENT_STATUS_STATE_UNLOAD;
6866 case RESET_KIND_SUSPEND:
6867 event = APE_EVENT_STATUS_STATE_SUSPEND;
6873 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6875 tg3_ape_send_event(tp, event);
6878 /* tp->lock is held. */
6879 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6881 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6882 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6884 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6886 case RESET_KIND_INIT:
6887 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6891 case RESET_KIND_SHUTDOWN:
6892 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6896 case RESET_KIND_SUSPEND:
6897 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6906 if (kind == RESET_KIND_INIT ||
6907 kind == RESET_KIND_SUSPEND)
6908 tg3_ape_driver_state_change(tp, kind);
6911 /* tp->lock is held. */
6912 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6914 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6916 case RESET_KIND_INIT:
6917 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6918 DRV_STATE_START_DONE);
6921 case RESET_KIND_SHUTDOWN:
6922 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6923 DRV_STATE_UNLOAD_DONE);
6931 if (kind == RESET_KIND_SHUTDOWN)
6932 tg3_ape_driver_state_change(tp, kind);
6935 /* tp->lock is held. */
6936 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6938 if (tg3_flag(tp, ENABLE_ASF)) {
6940 case RESET_KIND_INIT:
6941 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6945 case RESET_KIND_SHUTDOWN:
6946 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6950 case RESET_KIND_SUSPEND:
6951 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6961 static int tg3_poll_fw(struct tg3 *tp)
6966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6967 /* Wait up to 20ms for init done. */
6968 for (i = 0; i < 200; i++) {
6969 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6976 /* Wait for firmware initialization to complete. */
6977 for (i = 0; i < 100000; i++) {
6978 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6979 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6984 /* Chip might not be fitted with firmware. Some Sun onboard
6985 * parts are configured like that. So don't signal the timeout
6986 * of the above loop as an error, but do report the lack of
6987 * running firmware once.
6989 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6990 tg3_flag_set(tp, NO_FWARE_REPORTED);
6992 netdev_info(tp->dev, "No firmware running\n");
6995 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6996 /* The 57765 A0 needs a little more
6997 * time to do some important work.
7005 /* Save PCI command register before chip reset */
7006 static void tg3_save_pci_state(struct tg3 *tp)
7008 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7011 /* Restore PCI state after chip reset */
7012 static void tg3_restore_pci_state(struct tg3 *tp)
7016 /* Re-enable indirect register accesses. */
7017 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7018 tp->misc_host_ctrl);
7020 /* Set MAX PCI retry to zero. */
7021 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7022 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7023 tg3_flag(tp, PCIX_MODE))
7024 val |= PCISTATE_RETRY_SAME_DMA;
7025 /* Allow reads and writes to the APE register and memory space. */
7026 if (tg3_flag(tp, ENABLE_APE))
7027 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7028 PCISTATE_ALLOW_APE_SHMEM_WR |
7029 PCISTATE_ALLOW_APE_PSPACE_WR;
7030 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7032 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7034 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7035 if (tg3_flag(tp, PCI_EXPRESS))
7036 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7038 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7039 tp->pci_cacheline_sz);
7040 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7045 /* Make sure PCI-X relaxed ordering bit is clear. */
7046 if (tg3_flag(tp, PCIX_MODE)) {
7049 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7051 pcix_cmd &= ~PCI_X_CMD_ERO;
7052 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7056 if (tg3_flag(tp, 5780_CLASS)) {
7058 /* Chip reset on 5780 will reset MSI enable bit,
7059 * so need to restore it.
7061 if (tg3_flag(tp, USING_MSI)) {
7064 pci_read_config_word(tp->pdev,
7065 tp->msi_cap + PCI_MSI_FLAGS,
7067 pci_write_config_word(tp->pdev,
7068 tp->msi_cap + PCI_MSI_FLAGS,
7069 ctrl | PCI_MSI_FLAGS_ENABLE);
7070 val = tr32(MSGINT_MODE);
7071 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7076 static void tg3_stop_fw(struct tg3 *);
7078 /* tp->lock is held. */
7079 static int tg3_chip_reset(struct tg3 *tp)
7082 void (*write_op)(struct tg3 *, u32, u32);
7087 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7089 /* No matching tg3_nvram_unlock() after this because
7090 * chip reset below will undo the nvram lock.
7092 tp->nvram_lock_cnt = 0;
7094 /* GRC_MISC_CFG core clock reset will clear the memory
7095 * enable bit in PCI register 4 and the MSI enable bit
7096 * on some chips, so we save relevant registers here.
7098 tg3_save_pci_state(tp);
7100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7101 tg3_flag(tp, 5755_PLUS))
7102 tw32(GRC_FASTBOOT_PC, 0);
7105 * We must avoid the readl() that normally takes place.
7106 * It locks machines, causes machine checks, and other
7107 * fun things. So, temporarily disable the 5701
7108 * hardware workaround, while we do the reset.
7110 write_op = tp->write32;
7111 if (write_op == tg3_write_flush_reg32)
7112 tp->write32 = tg3_write32;
7114 /* Prevent the irq handler from reading or writing PCI registers
7115 * during chip reset when the memory enable bit in the PCI command
7116 * register may be cleared. The chip does not generate interrupt
7117 * at this time, but the irq handler may still be called due to irq
7118 * sharing or irqpoll.
7120 tg3_flag_set(tp, CHIP_RESETTING);
7121 for (i = 0; i < tp->irq_cnt; i++) {
7122 struct tg3_napi *tnapi = &tp->napi[i];
7123 if (tnapi->hw_status) {
7124 tnapi->hw_status->status = 0;
7125 tnapi->hw_status->status_tag = 0;
7127 tnapi->last_tag = 0;
7128 tnapi->last_irq_tag = 0;
7132 for (i = 0; i < tp->irq_cnt; i++)
7133 synchronize_irq(tp->napi[i].irq_vec);
7135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7136 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7137 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7141 val = GRC_MISC_CFG_CORECLK_RESET;
7143 if (tg3_flag(tp, PCI_EXPRESS)) {
7144 /* Force PCIe 1.0a mode */
7145 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7146 !tg3_flag(tp, 57765_PLUS) &&
7147 tr32(TG3_PCIE_PHY_TSTCTL) ==
7148 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7149 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7151 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7152 tw32(GRC_MISC_CFG, (1 << 29));
7157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7158 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7159 tw32(GRC_VCPU_EXT_CTRL,
7160 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7163 /* Manage gphy power for all CPMU absent PCIe devices. */
7164 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7165 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7167 tw32(GRC_MISC_CFG, val);
7169 /* restore 5701 hardware bug workaround write method */
7170 tp->write32 = write_op;
7172 /* Unfortunately, we have to delay before the PCI read back.
7173 * Some 575X chips even will not respond to a PCI cfg access
7174 * when the reset command is given to the chip.
7176 * How do these hardware designers expect things to work
7177 * properly if the PCI write is posted for a long period
7178 * of time? It is always necessary to have some method by
7179 * which a register read back can occur to push the write
7180 * out which does the reset.
7182 * For most tg3 variants the trick below was working.
7187 /* Flush PCI posted writes. The normal MMIO registers
7188 * are inaccessible at this time so this is the only
7189 * way to make this reliably (actually, this is no longer
7190 * the case, see above). I tried to use indirect
7191 * register read/write but this upset some 5701 variants.
7193 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7197 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7200 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7204 /* Wait for link training to complete. */
7205 for (i = 0; i < 5000; i++)
7208 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7209 pci_write_config_dword(tp->pdev, 0xc4,
7210 cfg_val | (1 << 15));
7213 /* Clear the "no snoop" and "relaxed ordering" bits. */
7214 pci_read_config_word(tp->pdev,
7215 tp->pcie_cap + PCI_EXP_DEVCTL,
7217 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7218 PCI_EXP_DEVCTL_NOSNOOP_EN);
7220 * Older PCIe devices only support the 128 byte
7221 * MPS setting. Enforce the restriction.
7223 if (!tg3_flag(tp, CPMU_PRESENT))
7224 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7225 pci_write_config_word(tp->pdev,
7226 tp->pcie_cap + PCI_EXP_DEVCTL,
7229 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7231 /* Clear error status */
7232 pci_write_config_word(tp->pdev,
7233 tp->pcie_cap + PCI_EXP_DEVSTA,
7234 PCI_EXP_DEVSTA_CED |
7235 PCI_EXP_DEVSTA_NFED |
7236 PCI_EXP_DEVSTA_FED |
7237 PCI_EXP_DEVSTA_URD);
7240 tg3_restore_pci_state(tp);
7242 tg3_flag_clear(tp, CHIP_RESETTING);
7243 tg3_flag_clear(tp, ERROR_PROCESSED);
7246 if (tg3_flag(tp, 5780_CLASS))
7247 val = tr32(MEMARB_MODE);
7248 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7250 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7252 tw32(0x5000, 0x400);
7255 tw32(GRC_MODE, tp->grc_mode);
7257 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7260 tw32(0xc4, val | (1 << 15));
7263 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7265 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7266 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7267 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7268 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7271 if (tg3_flag(tp, ENABLE_APE))
7272 tp->mac_mode = MAC_MODE_APE_TX_EN |
7273 MAC_MODE_APE_RX_EN |
7274 MAC_MODE_TDE_ENABLE;
7276 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7277 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7279 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7280 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7285 tw32_f(MAC_MODE, val);
7288 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7290 err = tg3_poll_fw(tp);
7296 if (tg3_flag(tp, PCI_EXPRESS) &&
7297 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7298 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7299 !tg3_flag(tp, 57765_PLUS)) {
7302 tw32(0x7c00, val | (1 << 25));
7305 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7306 val = tr32(TG3_CPMU_CLCK_ORIDE);
7307 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7310 /* Reprobe ASF enable state. */
7311 tg3_flag_clear(tp, ENABLE_ASF);
7312 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7313 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7314 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7317 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7318 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7319 tg3_flag_set(tp, ENABLE_ASF);
7320 tp->last_event_jiffies = jiffies;
7321 if (tg3_flag(tp, 5750_PLUS))
7322 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7329 /* tp->lock is held. */
7330 static void tg3_stop_fw(struct tg3 *tp)
7332 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7333 /* Wait for RX cpu to ACK the previous event. */
7334 tg3_wait_for_event_ack(tp);
7336 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7338 tg3_generate_fw_event(tp);
7340 /* Wait for RX cpu to ACK this event. */
7341 tg3_wait_for_event_ack(tp);
7345 /* tp->lock is held. */
7346 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7352 tg3_write_sig_pre_reset(tp, kind);
7354 tg3_abort_hw(tp, silent);
7355 err = tg3_chip_reset(tp);
7357 __tg3_set_mac_addr(tp, 0);
7359 tg3_write_sig_legacy(tp, kind);
7360 tg3_write_sig_post_reset(tp, kind);
7368 #define RX_CPU_SCRATCH_BASE 0x30000
7369 #define RX_CPU_SCRATCH_SIZE 0x04000
7370 #define TX_CPU_SCRATCH_BASE 0x34000
7371 #define TX_CPU_SCRATCH_SIZE 0x04000
7373 /* tp->lock is held. */
7374 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7378 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7381 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7383 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7386 if (offset == RX_CPU_BASE) {
7387 for (i = 0; i < 10000; i++) {
7388 tw32(offset + CPU_STATE, 0xffffffff);
7389 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7390 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7394 tw32(offset + CPU_STATE, 0xffffffff);
7395 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7398 for (i = 0; i < 10000; i++) {
7399 tw32(offset + CPU_STATE, 0xffffffff);
7400 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7401 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7407 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7408 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7412 /* Clear firmware's nvram arbitration. */
7413 if (tg3_flag(tp, NVRAM))
7414 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7419 unsigned int fw_base;
7420 unsigned int fw_len;
7421 const __be32 *fw_data;
7424 /* tp->lock is held. */
7425 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7426 int cpu_scratch_size, struct fw_info *info)
7428 int err, lock_err, i;
7429 void (*write_op)(struct tg3 *, u32, u32);
7431 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7433 "%s: Trying to load TX cpu firmware which is 5705\n",
7438 if (tg3_flag(tp, 5705_PLUS))
7439 write_op = tg3_write_mem;
7441 write_op = tg3_write_indirect_reg32;
7443 /* It is possible that bootcode is still loading at this point.
7444 * Get the nvram lock first before halting the cpu.
7446 lock_err = tg3_nvram_lock(tp);
7447 err = tg3_halt_cpu(tp, cpu_base);
7449 tg3_nvram_unlock(tp);
7453 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7454 write_op(tp, cpu_scratch_base + i, 0);
7455 tw32(cpu_base + CPU_STATE, 0xffffffff);
7456 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7457 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7458 write_op(tp, (cpu_scratch_base +
7459 (info->fw_base & 0xffff) +
7461 be32_to_cpu(info->fw_data[i]));
7469 /* tp->lock is held. */
7470 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7472 struct fw_info info;
7473 const __be32 *fw_data;
7476 fw_data = (void *)tp->fw->data;
7478 /* Firmware blob starts with version numbers, followed by
7479 start address and length. We are setting complete length.
7480 length = end_address_of_bss - start_address_of_text.
7481 Remainder is the blob to be loaded contiguously
7482 from start address. */
7484 info.fw_base = be32_to_cpu(fw_data[1]);
7485 info.fw_len = tp->fw->size - 12;
7486 info.fw_data = &fw_data[3];
7488 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7489 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7494 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7495 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7500 /* Now startup only the RX cpu. */
7501 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7502 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7504 for (i = 0; i < 5; i++) {
7505 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7507 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7508 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7509 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7513 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7514 "should be %08x\n", __func__,
7515 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7518 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7519 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7524 /* tp->lock is held. */
7525 static int tg3_load_tso_firmware(struct tg3 *tp)
7527 struct fw_info info;
7528 const __be32 *fw_data;
7529 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7532 if (tg3_flag(tp, HW_TSO_1) ||
7533 tg3_flag(tp, HW_TSO_2) ||
7534 tg3_flag(tp, HW_TSO_3))
7537 fw_data = (void *)tp->fw->data;
7539 /* Firmware blob starts with version numbers, followed by
7540 start address and length. We are setting complete length.
7541 length = end_address_of_bss - start_address_of_text.
7542 Remainder is the blob to be loaded contiguously
7543 from start address. */
7545 info.fw_base = be32_to_cpu(fw_data[1]);
7546 cpu_scratch_size = tp->fw_len;
7547 info.fw_len = tp->fw->size - 12;
7548 info.fw_data = &fw_data[3];
7550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7551 cpu_base = RX_CPU_BASE;
7552 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7554 cpu_base = TX_CPU_BASE;
7555 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7556 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7559 err = tg3_load_firmware_cpu(tp, cpu_base,
7560 cpu_scratch_base, cpu_scratch_size,
7565 /* Now startup the cpu. */
7566 tw32(cpu_base + CPU_STATE, 0xffffffff);
7567 tw32_f(cpu_base + CPU_PC, info.fw_base);
7569 for (i = 0; i < 5; i++) {
7570 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7572 tw32(cpu_base + CPU_STATE, 0xffffffff);
7573 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7574 tw32_f(cpu_base + CPU_PC, info.fw_base);
7579 "%s fails to set CPU PC, is %08x should be %08x\n",
7580 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7583 tw32(cpu_base + CPU_STATE, 0xffffffff);
7584 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7589 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7591 struct tg3 *tp = netdev_priv(dev);
7592 struct sockaddr *addr = p;
7593 int err = 0, skip_mac_1 = 0;
7595 if (!is_valid_ether_addr(addr->sa_data))
7598 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7600 if (!netif_running(dev))
7603 if (tg3_flag(tp, ENABLE_ASF)) {
7604 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7606 addr0_high = tr32(MAC_ADDR_0_HIGH);
7607 addr0_low = tr32(MAC_ADDR_0_LOW);
7608 addr1_high = tr32(MAC_ADDR_1_HIGH);
7609 addr1_low = tr32(MAC_ADDR_1_LOW);
7611 /* Skip MAC addr 1 if ASF is using it. */
7612 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7613 !(addr1_high == 0 && addr1_low == 0))
7616 spin_lock_bh(&tp->lock);
7617 __tg3_set_mac_addr(tp, skip_mac_1);
7618 spin_unlock_bh(&tp->lock);
7623 /* tp->lock is held. */
7624 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7625 dma_addr_t mapping, u32 maxlen_flags,
7629 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7630 ((u64) mapping >> 32));
7632 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7633 ((u64) mapping & 0xffffffff));
7635 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7638 if (!tg3_flag(tp, 5705_PLUS))
7640 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7644 static void __tg3_set_rx_mode(struct net_device *);
7645 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7649 if (!tg3_flag(tp, ENABLE_TSS)) {
7650 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7651 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7652 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7654 tw32(HOSTCC_TXCOL_TICKS, 0);
7655 tw32(HOSTCC_TXMAX_FRAMES, 0);
7656 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7659 if (!tg3_flag(tp, ENABLE_RSS)) {
7660 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7661 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7662 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7664 tw32(HOSTCC_RXCOL_TICKS, 0);
7665 tw32(HOSTCC_RXMAX_FRAMES, 0);
7666 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7669 if (!tg3_flag(tp, 5705_PLUS)) {
7670 u32 val = ec->stats_block_coalesce_usecs;
7672 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7673 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7675 if (!netif_carrier_ok(tp->dev))
7678 tw32(HOSTCC_STAT_COAL_TICKS, val);
7681 for (i = 0; i < tp->irq_cnt - 1; i++) {
7684 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7685 tw32(reg, ec->rx_coalesce_usecs);
7686 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7687 tw32(reg, ec->rx_max_coalesced_frames);
7688 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7689 tw32(reg, ec->rx_max_coalesced_frames_irq);
7691 if (tg3_flag(tp, ENABLE_TSS)) {
7692 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7693 tw32(reg, ec->tx_coalesce_usecs);
7694 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7695 tw32(reg, ec->tx_max_coalesced_frames);
7696 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7697 tw32(reg, ec->tx_max_coalesced_frames_irq);
7701 for (; i < tp->irq_max - 1; i++) {
7702 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7703 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7704 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7706 if (tg3_flag(tp, ENABLE_TSS)) {
7707 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7708 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7709 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7714 /* tp->lock is held. */
7715 static void tg3_rings_reset(struct tg3 *tp)
7718 u32 stblk, txrcb, rxrcb, limit;
7719 struct tg3_napi *tnapi = &tp->napi[0];
7721 /* Disable all transmit rings but the first. */
7722 if (!tg3_flag(tp, 5705_PLUS))
7723 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7724 else if (tg3_flag(tp, 5717_PLUS))
7725 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7726 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7727 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7729 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7731 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7732 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7733 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7734 BDINFO_FLAGS_DISABLED);
7737 /* Disable all receive return rings but the first. */
7738 if (tg3_flag(tp, 5717_PLUS))
7739 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7740 else if (!tg3_flag(tp, 5705_PLUS))
7741 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7742 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7744 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7746 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7748 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7749 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7750 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7751 BDINFO_FLAGS_DISABLED);
7753 /* Disable interrupts */
7754 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7756 /* Zero mailbox registers. */
7757 if (tg3_flag(tp, SUPPORT_MSIX)) {
7758 for (i = 1; i < tp->irq_max; i++) {
7759 tp->napi[i].tx_prod = 0;
7760 tp->napi[i].tx_cons = 0;
7761 if (tg3_flag(tp, ENABLE_TSS))
7762 tw32_mailbox(tp->napi[i].prodmbox, 0);
7763 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7764 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7766 if (!tg3_flag(tp, ENABLE_TSS))
7767 tw32_mailbox(tp->napi[0].prodmbox, 0);
7769 tp->napi[0].tx_prod = 0;
7770 tp->napi[0].tx_cons = 0;
7771 tw32_mailbox(tp->napi[0].prodmbox, 0);
7772 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7775 /* Make sure the NIC-based send BD rings are disabled. */
7776 if (!tg3_flag(tp, 5705_PLUS)) {
7777 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7778 for (i = 0; i < 16; i++)
7779 tw32_tx_mbox(mbox + i * 8, 0);
7782 txrcb = NIC_SRAM_SEND_RCB;
7783 rxrcb = NIC_SRAM_RCV_RET_RCB;
7785 /* Clear status block in ram. */
7786 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7788 /* Set status block DMA address */
7789 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7790 ((u64) tnapi->status_mapping >> 32));
7791 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7792 ((u64) tnapi->status_mapping & 0xffffffff));
7794 if (tnapi->tx_ring) {
7795 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7796 (TG3_TX_RING_SIZE <<
7797 BDINFO_FLAGS_MAXLEN_SHIFT),
7798 NIC_SRAM_TX_BUFFER_DESC);
7799 txrcb += TG3_BDINFO_SIZE;
7802 if (tnapi->rx_rcb) {
7803 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7804 (tp->rx_ret_ring_mask + 1) <<
7805 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7806 rxrcb += TG3_BDINFO_SIZE;
7809 stblk = HOSTCC_STATBLCK_RING1;
7811 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7812 u64 mapping = (u64)tnapi->status_mapping;
7813 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7814 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7816 /* Clear status block in ram. */
7817 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7819 if (tnapi->tx_ring) {
7820 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7821 (TG3_TX_RING_SIZE <<
7822 BDINFO_FLAGS_MAXLEN_SHIFT),
7823 NIC_SRAM_TX_BUFFER_DESC);
7824 txrcb += TG3_BDINFO_SIZE;
7827 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7828 ((tp->rx_ret_ring_mask + 1) <<
7829 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7832 rxrcb += TG3_BDINFO_SIZE;
7836 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7838 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7840 if (!tg3_flag(tp, 5750_PLUS) ||
7841 tg3_flag(tp, 5780_CLASS) ||
7842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7844 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7845 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7847 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7849 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7851 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7852 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7854 val = min(nic_rep_thresh, host_rep_thresh);
7855 tw32(RCVBDI_STD_THRESH, val);
7857 if (tg3_flag(tp, 57765_PLUS))
7858 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7860 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7863 if (!tg3_flag(tp, 5705_PLUS))
7864 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7866 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7868 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7870 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7871 tw32(RCVBDI_JUMBO_THRESH, val);
7873 if (tg3_flag(tp, 57765_PLUS))
7874 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7877 /* tp->lock is held. */
7878 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7880 u32 val, rdmac_mode;
7882 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7884 tg3_disable_ints(tp);
7888 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7890 if (tg3_flag(tp, INIT_COMPLETE))
7891 tg3_abort_hw(tp, 1);
7893 /* Enable MAC control of LPI */
7894 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7895 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7896 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7897 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7899 tw32_f(TG3_CPMU_EEE_CTRL,
7900 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7902 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7903 TG3_CPMU_EEEMD_LPI_IN_TX |
7904 TG3_CPMU_EEEMD_LPI_IN_RX |
7905 TG3_CPMU_EEEMD_EEE_ENABLE;
7907 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7908 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7910 if (tg3_flag(tp, ENABLE_APE))
7911 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7913 tw32_f(TG3_CPMU_EEE_MODE, val);
7915 tw32_f(TG3_CPMU_EEE_DBTMR1,
7916 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7917 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7919 tw32_f(TG3_CPMU_EEE_DBTMR2,
7920 TG3_CPMU_DBTMR2_APE_TX_2047US |
7921 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7927 err = tg3_chip_reset(tp);
7931 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7933 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7934 val = tr32(TG3_CPMU_CTRL);
7935 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7936 tw32(TG3_CPMU_CTRL, val);
7938 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7939 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7940 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7941 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7943 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7944 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7945 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7946 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7948 val = tr32(TG3_CPMU_HST_ACC);
7949 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7950 val |= CPMU_HST_ACC_MACCLK_6_25;
7951 tw32(TG3_CPMU_HST_ACC, val);
7954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7955 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7956 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7957 PCIE_PWR_MGMT_L1_THRESH_4MS;
7958 tw32(PCIE_PWR_MGMT_THRESH, val);
7960 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7961 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7963 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7965 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7966 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7969 if (tg3_flag(tp, L1PLLPD_EN)) {
7970 u32 grc_mode = tr32(GRC_MODE);
7972 /* Access the lower 1K of PL PCIE block registers. */
7973 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7974 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7976 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7977 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7978 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7980 tw32(GRC_MODE, grc_mode);
7983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7984 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7985 u32 grc_mode = tr32(GRC_MODE);
7987 /* Access the lower 1K of PL PCIE block registers. */
7988 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7989 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7991 val = tr32(TG3_PCIE_TLDLPL_PORT +
7992 TG3_PCIE_PL_LO_PHYCTL5);
7993 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7994 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7996 tw32(GRC_MODE, grc_mode);
7999 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8000 u32 grc_mode = tr32(GRC_MODE);
8002 /* Access the lower 1K of DL PCIE block registers. */
8003 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8004 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8006 val = tr32(TG3_PCIE_TLDLPL_PORT +
8007 TG3_PCIE_DL_LO_FTSMAX);
8008 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8009 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8010 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8012 tw32(GRC_MODE, grc_mode);
8015 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8016 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8017 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8018 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8021 /* This works around an issue with Athlon chipsets on
8022 * B3 tigon3 silicon. This bit has no effect on any
8023 * other revision. But do not set this on PCI Express
8024 * chips and don't even touch the clocks if the CPMU is present.
8026 if (!tg3_flag(tp, CPMU_PRESENT)) {
8027 if (!tg3_flag(tp, PCI_EXPRESS))
8028 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8029 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8032 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8033 tg3_flag(tp, PCIX_MODE)) {
8034 val = tr32(TG3PCI_PCISTATE);
8035 val |= PCISTATE_RETRY_SAME_DMA;
8036 tw32(TG3PCI_PCISTATE, val);
8039 if (tg3_flag(tp, ENABLE_APE)) {
8040 /* Allow reads and writes to the
8041 * APE register and memory space.
8043 val = tr32(TG3PCI_PCISTATE);
8044 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8045 PCISTATE_ALLOW_APE_SHMEM_WR |
8046 PCISTATE_ALLOW_APE_PSPACE_WR;
8047 tw32(TG3PCI_PCISTATE, val);
8050 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8051 /* Enable some hw fixes. */
8052 val = tr32(TG3PCI_MSI_DATA);
8053 val |= (1 << 26) | (1 << 28) | (1 << 29);
8054 tw32(TG3PCI_MSI_DATA, val);
8057 /* Descriptor ring init may make accesses to the
8058 * NIC SRAM area to setup the TX descriptors, so we
8059 * can only do this after the hardware has been
8060 * successfully reset.
8062 err = tg3_init_rings(tp);
8066 if (tg3_flag(tp, 57765_PLUS)) {
8067 val = tr32(TG3PCI_DMA_RW_CTRL) &
8068 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8069 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8070 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8071 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8072 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8073 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8074 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8075 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8076 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8077 /* This value is determined during the probe time DMA
8078 * engine test, tg3_test_dma.
8080 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8083 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8084 GRC_MODE_4X_NIC_SEND_RINGS |
8085 GRC_MODE_NO_TX_PHDR_CSUM |
8086 GRC_MODE_NO_RX_PHDR_CSUM);
8087 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8089 /* Pseudo-header checksum is done by hardware logic and not
8090 * the offload processers, so make the chip do the pseudo-
8091 * header checksums on receive. For transmit it is more
8092 * convenient to do the pseudo-header checksum in software
8093 * as Linux does that on transmit for us in all cases.
8095 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8099 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8101 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8102 val = tr32(GRC_MISC_CFG);
8104 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8105 tw32(GRC_MISC_CFG, val);
8107 /* Initialize MBUF/DESC pool. */
8108 if (tg3_flag(tp, 5750_PLUS)) {
8110 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8111 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8113 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8115 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8116 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8117 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8118 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8121 fw_len = tp->fw_len;
8122 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8123 tw32(BUFMGR_MB_POOL_ADDR,
8124 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8125 tw32(BUFMGR_MB_POOL_SIZE,
8126 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8129 if (tp->dev->mtu <= ETH_DATA_LEN) {
8130 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8131 tp->bufmgr_config.mbuf_read_dma_low_water);
8132 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8133 tp->bufmgr_config.mbuf_mac_rx_low_water);
8134 tw32(BUFMGR_MB_HIGH_WATER,
8135 tp->bufmgr_config.mbuf_high_water);
8137 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8138 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8139 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8140 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8141 tw32(BUFMGR_MB_HIGH_WATER,
8142 tp->bufmgr_config.mbuf_high_water_jumbo);
8144 tw32(BUFMGR_DMA_LOW_WATER,
8145 tp->bufmgr_config.dma_low_water);
8146 tw32(BUFMGR_DMA_HIGH_WATER,
8147 tp->bufmgr_config.dma_high_water);
8149 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8151 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8153 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8154 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8155 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8156 tw32(BUFMGR_MODE, val);
8157 for (i = 0; i < 2000; i++) {
8158 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8163 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8167 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8168 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8170 tg3_setup_rxbd_thresholds(tp);
8172 /* Initialize TG3_BDINFO's at:
8173 * RCVDBDI_STD_BD: standard eth size rx ring
8174 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8175 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8178 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8179 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8180 * ring attribute flags
8181 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8183 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8184 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8186 * The size of each ring is fixed in the firmware, but the location is
8189 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8190 ((u64) tpr->rx_std_mapping >> 32));
8191 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8192 ((u64) tpr->rx_std_mapping & 0xffffffff));
8193 if (!tg3_flag(tp, 5717_PLUS))
8194 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8195 NIC_SRAM_RX_BUFFER_DESC);
8197 /* Disable the mini ring */
8198 if (!tg3_flag(tp, 5705_PLUS))
8199 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8200 BDINFO_FLAGS_DISABLED);
8202 /* Program the jumbo buffer descriptor ring control
8203 * blocks on those devices that have them.
8205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8206 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8208 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8209 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8210 ((u64) tpr->rx_jmb_mapping >> 32));
8211 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8212 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8213 val = TG3_RX_JMB_RING_SIZE(tp) <<
8214 BDINFO_FLAGS_MAXLEN_SHIFT;
8215 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8216 val | BDINFO_FLAGS_USE_EXT_RECV);
8217 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8219 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8220 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8222 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8223 BDINFO_FLAGS_DISABLED);
8226 if (tg3_flag(tp, 57765_PLUS)) {
8227 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8228 val = TG3_RX_STD_MAX_SIZE_5700;
8230 val = TG3_RX_STD_MAX_SIZE_5717;
8231 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8232 val |= (TG3_RX_STD_DMA_SZ << 2);
8234 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8236 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8238 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8240 tpr->rx_std_prod_idx = tp->rx_pending;
8241 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8243 tpr->rx_jmb_prod_idx =
8244 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8245 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8247 tg3_rings_reset(tp);
8249 /* Initialize MAC address and backoff seed. */
8250 __tg3_set_mac_addr(tp, 0);
8252 /* MTU + ethernet header + FCS + optional VLAN tag */
8253 tw32(MAC_RX_MTU_SIZE,
8254 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8256 /* The slot time is changed by tg3_setup_phy if we
8257 * run at gigabit with half duplex.
8259 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8260 (6 << TX_LENGTHS_IPG_SHIFT) |
8261 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8264 val |= tr32(MAC_TX_LENGTHS) &
8265 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8266 TX_LENGTHS_CNT_DWN_VAL_MSK);
8268 tw32(MAC_TX_LENGTHS, val);
8270 /* Receive rules. */
8271 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8272 tw32(RCVLPC_CONFIG, 0x0181);
8274 /* Calculate RDMAC_MODE setting early, we need it to determine
8275 * the RCVLPC_STATE_ENABLE mask.
8277 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8278 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8279 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8280 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8281 RDMAC_MODE_LNGREAD_ENAB);
8283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8284 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8289 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8290 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8291 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8293 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8294 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8295 if (tg3_flag(tp, TSO_CAPABLE) &&
8296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8297 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8298 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8299 !tg3_flag(tp, IS_5788)) {
8300 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8304 if (tg3_flag(tp, PCI_EXPRESS))
8305 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8307 if (tg3_flag(tp, HW_TSO_1) ||
8308 tg3_flag(tp, HW_TSO_2) ||
8309 tg3_flag(tp, HW_TSO_3))
8310 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8312 if (tg3_flag(tp, 57765_PLUS) ||
8313 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8314 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8315 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8318 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8320 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8322 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8323 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8324 tg3_flag(tp, 57765_PLUS)) {
8325 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8328 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8329 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8330 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8331 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8332 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8333 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8335 tw32(TG3_RDMA_RSRVCTRL_REG,
8336 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8339 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8340 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8341 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8342 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8343 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8344 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8347 /* Receive/send statistics. */
8348 if (tg3_flag(tp, 5750_PLUS)) {
8349 val = tr32(RCVLPC_STATS_ENABLE);
8350 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8351 tw32(RCVLPC_STATS_ENABLE, val);
8352 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8353 tg3_flag(tp, TSO_CAPABLE)) {
8354 val = tr32(RCVLPC_STATS_ENABLE);
8355 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8356 tw32(RCVLPC_STATS_ENABLE, val);
8358 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8360 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8361 tw32(SNDDATAI_STATSENAB, 0xffffff);
8362 tw32(SNDDATAI_STATSCTRL,
8363 (SNDDATAI_SCTRL_ENABLE |
8364 SNDDATAI_SCTRL_FASTUPD));
8366 /* Setup host coalescing engine. */
8367 tw32(HOSTCC_MODE, 0);
8368 for (i = 0; i < 2000; i++) {
8369 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8374 __tg3_set_coalesce(tp, &tp->coal);
8376 if (!tg3_flag(tp, 5705_PLUS)) {
8377 /* Status/statistics block address. See tg3_timer,
8378 * the tg3_periodic_fetch_stats call there, and
8379 * tg3_get_stats to see how this works for 5705/5750 chips.
8381 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8382 ((u64) tp->stats_mapping >> 32));
8383 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8384 ((u64) tp->stats_mapping & 0xffffffff));
8385 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8387 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8389 /* Clear statistics and status block memory areas */
8390 for (i = NIC_SRAM_STATS_BLK;
8391 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8393 tg3_write_mem(tp, i, 0);
8398 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8400 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8401 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8402 if (!tg3_flag(tp, 5705_PLUS))
8403 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8405 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8406 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8407 /* reset to prevent losing 1st rx packet intermittently */
8408 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8412 if (tg3_flag(tp, ENABLE_APE))
8413 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8416 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8417 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8418 if (!tg3_flag(tp, 5705_PLUS) &&
8419 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8420 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8421 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8422 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8425 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8426 * If TG3_FLAG_IS_NIC is zero, we should read the
8427 * register to preserve the GPIO settings for LOMs. The GPIOs,
8428 * whether used as inputs or outputs, are set by boot code after
8431 if (!tg3_flag(tp, IS_NIC)) {
8434 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8435 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8436 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8439 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8440 GRC_LCLCTRL_GPIO_OUTPUT3;
8442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8443 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8445 tp->grc_local_ctrl &= ~gpio_mask;
8446 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8448 /* GPIO1 must be driven high for eeprom write protect */
8449 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8450 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8451 GRC_LCLCTRL_GPIO_OUTPUT1);
8453 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8456 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8457 val = tr32(MSGINT_MODE);
8458 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8459 tw32(MSGINT_MODE, val);
8462 if (!tg3_flag(tp, 5705_PLUS)) {
8463 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8467 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8468 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8469 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8470 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8471 WDMAC_MODE_LNGREAD_ENAB);
8473 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8474 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8475 if (tg3_flag(tp, TSO_CAPABLE) &&
8476 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8477 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8479 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8480 !tg3_flag(tp, IS_5788)) {
8481 val |= WDMAC_MODE_RX_ACCEL;
8485 /* Enable host coalescing bug fix */
8486 if (tg3_flag(tp, 5755_PLUS))
8487 val |= WDMAC_MODE_STATUS_TAG_FIX;
8489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8490 val |= WDMAC_MODE_BURST_ALL_DATA;
8492 tw32_f(WDMAC_MODE, val);
8495 if (tg3_flag(tp, PCIX_MODE)) {
8498 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8500 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8501 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8502 pcix_cmd |= PCI_X_CMD_READ_2K;
8503 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8504 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8505 pcix_cmd |= PCI_X_CMD_READ_2K;
8507 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8511 tw32_f(RDMAC_MODE, rdmac_mode);
8514 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8515 if (!tg3_flag(tp, 5705_PLUS))
8516 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8520 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8522 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8524 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8525 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8526 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8527 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8528 val |= RCVDBDI_MODE_LRG_RING_SZ;
8529 tw32(RCVDBDI_MODE, val);
8530 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8531 if (tg3_flag(tp, HW_TSO_1) ||
8532 tg3_flag(tp, HW_TSO_2) ||
8533 tg3_flag(tp, HW_TSO_3))
8534 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8535 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8536 if (tg3_flag(tp, ENABLE_TSS))
8537 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8538 tw32(SNDBDI_MODE, val);
8539 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8541 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8542 err = tg3_load_5701_a0_firmware_fix(tp);
8547 if (tg3_flag(tp, TSO_CAPABLE)) {
8548 err = tg3_load_tso_firmware(tp);
8553 tp->tx_mode = TX_MODE_ENABLE;
8555 if (tg3_flag(tp, 5755_PLUS) ||
8556 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8557 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8559 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8560 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8561 tp->tx_mode &= ~val;
8562 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8565 tw32_f(MAC_TX_MODE, tp->tx_mode);
8568 if (tg3_flag(tp, ENABLE_RSS)) {
8569 u32 reg = MAC_RSS_INDIR_TBL_0;
8570 u8 *ent = (u8 *)&val;
8572 /* Setup the indirection table */
8573 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8574 int idx = i % sizeof(val);
8576 ent[idx] = i % (tp->irq_cnt - 1);
8577 if (idx == sizeof(val) - 1) {
8583 /* Setup the "secret" hash key. */
8584 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8585 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8586 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8587 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8588 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8589 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8590 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8591 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8592 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8593 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8596 tp->rx_mode = RX_MODE_ENABLE;
8597 if (tg3_flag(tp, 5755_PLUS))
8598 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8600 if (tg3_flag(tp, ENABLE_RSS))
8601 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8602 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8603 RX_MODE_RSS_IPV6_HASH_EN |
8604 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8605 RX_MODE_RSS_IPV4_HASH_EN |
8606 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8608 tw32_f(MAC_RX_MODE, tp->rx_mode);
8611 tw32(MAC_LED_CTRL, tp->led_ctrl);
8613 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8614 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8615 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8618 tw32_f(MAC_RX_MODE, tp->rx_mode);
8621 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8622 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8623 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8624 /* Set drive transmission level to 1.2V */
8625 /* only if the signal pre-emphasis bit is not set */
8626 val = tr32(MAC_SERDES_CFG);
8629 tw32(MAC_SERDES_CFG, val);
8631 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8632 tw32(MAC_SERDES_CFG, 0x616000);
8635 /* Prevent chip from dropping frames when flow control
8638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8642 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8645 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8646 /* Use hardware link auto-negotiation */
8647 tg3_flag_set(tp, HW_AUTONEG);
8650 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8651 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8654 tmp = tr32(SERDES_RX_CTRL);
8655 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8656 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8657 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8658 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8661 if (!tg3_flag(tp, USE_PHYLIB)) {
8662 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8663 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8664 tp->link_config.speed = tp->link_config.orig_speed;
8665 tp->link_config.duplex = tp->link_config.orig_duplex;
8666 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8669 err = tg3_setup_phy(tp, 0);
8673 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8674 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8677 /* Clear CRC stats. */
8678 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8679 tg3_writephy(tp, MII_TG3_TEST1,
8680 tmp | MII_TG3_TEST1_CRC_EN);
8681 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8686 __tg3_set_rx_mode(tp->dev);
8688 /* Initialize receive rules. */
8689 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8690 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8691 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8692 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8694 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8698 if (tg3_flag(tp, ENABLE_ASF))
8702 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8704 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8706 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8708 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8710 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8712 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8714 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8716 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8718 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8720 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8722 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8724 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8726 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8728 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8736 if (tg3_flag(tp, ENABLE_APE))
8737 /* Write our heartbeat update interval to APE. */
8738 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8739 APE_HOST_HEARTBEAT_INT_DISABLE);
8741 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8746 /* Called at device open time to get the chip ready for
8747 * packet processing. Invoked with tp->lock held.
8749 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8751 tg3_switch_clocks(tp);
8753 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8755 return tg3_reset_hw(tp, reset_phy);
8758 #define TG3_STAT_ADD32(PSTAT, REG) \
8759 do { u32 __val = tr32(REG); \
8760 (PSTAT)->low += __val; \
8761 if ((PSTAT)->low < __val) \
8762 (PSTAT)->high += 1; \
8765 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8767 struct tg3_hw_stats *sp = tp->hw_stats;
8769 if (!netif_carrier_ok(tp->dev))
8772 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8773 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8774 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8775 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8776 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8777 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8778 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8779 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8780 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8781 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8782 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8783 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8784 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8786 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8787 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8788 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8789 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8790 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8791 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8792 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8793 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8794 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8795 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8796 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8797 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8798 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8799 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8801 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8802 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8803 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8804 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8805 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8807 u32 val = tr32(HOSTCC_FLOW_ATTN);
8808 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8810 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8811 sp->rx_discards.low += val;
8812 if (sp->rx_discards.low < val)
8813 sp->rx_discards.high += 1;
8815 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8817 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8820 static void tg3_timer(unsigned long __opaque)
8822 struct tg3 *tp = (struct tg3 *) __opaque;
8827 spin_lock(&tp->lock);
8829 if (!tg3_flag(tp, TAGGED_STATUS)) {
8830 /* All of this garbage is because when using non-tagged
8831 * IRQ status the mailbox/status_block protocol the chip
8832 * uses with the cpu is race prone.
8834 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8835 tw32(GRC_LOCAL_CTRL,
8836 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8838 tw32(HOSTCC_MODE, tp->coalesce_mode |
8839 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8842 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8843 tg3_flag_set(tp, RESTART_TIMER);
8844 spin_unlock(&tp->lock);
8845 schedule_work(&tp->reset_task);
8850 /* This part only runs once per second. */
8851 if (!--tp->timer_counter) {
8852 if (tg3_flag(tp, 5705_PLUS))
8853 tg3_periodic_fetch_stats(tp);
8855 if (tp->setlpicnt && !--tp->setlpicnt)
8856 tg3_phy_eee_enable(tp);
8858 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8862 mac_stat = tr32(MAC_STATUS);
8865 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8866 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8868 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8872 tg3_setup_phy(tp, 0);
8873 } else if (tg3_flag(tp, POLL_SERDES)) {
8874 u32 mac_stat = tr32(MAC_STATUS);
8877 if (netif_carrier_ok(tp->dev) &&
8878 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8881 if (!netif_carrier_ok(tp->dev) &&
8882 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8883 MAC_STATUS_SIGNAL_DET))) {
8887 if (!tp->serdes_counter) {
8890 ~MAC_MODE_PORT_MODE_MASK));
8892 tw32_f(MAC_MODE, tp->mac_mode);
8895 tg3_setup_phy(tp, 0);
8897 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8898 tg3_flag(tp, 5780_CLASS)) {
8899 tg3_serdes_parallel_detect(tp);
8902 tp->timer_counter = tp->timer_multiplier;
8905 /* Heartbeat is only sent once every 2 seconds.
8907 * The heartbeat is to tell the ASF firmware that the host
8908 * driver is still alive. In the event that the OS crashes,
8909 * ASF needs to reset the hardware to free up the FIFO space
8910 * that may be filled with rx packets destined for the host.
8911 * If the FIFO is full, ASF will no longer function properly.
8913 * Unintended resets have been reported on real time kernels
8914 * where the timer doesn't run on time. Netpoll will also have
8917 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8918 * to check the ring condition when the heartbeat is expiring
8919 * before doing the reset. This will prevent most unintended
8922 if (!--tp->asf_counter) {
8923 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8924 tg3_wait_for_event_ack(tp);
8926 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8927 FWCMD_NICDRV_ALIVE3);
8928 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8929 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8930 TG3_FW_UPDATE_TIMEOUT_SEC);
8932 tg3_generate_fw_event(tp);
8934 tp->asf_counter = tp->asf_multiplier;
8937 spin_unlock(&tp->lock);
8940 tp->timer.expires = jiffies + tp->timer_offset;
8941 add_timer(&tp->timer);
8944 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8947 unsigned long flags;
8949 struct tg3_napi *tnapi = &tp->napi[irq_num];
8951 if (tp->irq_cnt == 1)
8952 name = tp->dev->name;
8954 name = &tnapi->irq_lbl[0];
8955 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8956 name[IFNAMSIZ-1] = 0;
8959 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8961 if (tg3_flag(tp, 1SHOT_MSI))
8966 if (tg3_flag(tp, TAGGED_STATUS))
8967 fn = tg3_interrupt_tagged;
8968 flags = IRQF_SHARED;
8971 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8974 static int tg3_test_interrupt(struct tg3 *tp)
8976 struct tg3_napi *tnapi = &tp->napi[0];
8977 struct net_device *dev = tp->dev;
8978 int err, i, intr_ok = 0;
8981 if (!netif_running(dev))
8984 tg3_disable_ints(tp);
8986 free_irq(tnapi->irq_vec, tnapi);
8989 * Turn off MSI one shot mode. Otherwise this test has no
8990 * observable way to know whether the interrupt was delivered.
8992 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8993 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8994 tw32(MSGINT_MODE, val);
8997 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8998 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9002 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9003 tg3_enable_ints(tp);
9005 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9008 for (i = 0; i < 5; i++) {
9009 u32 int_mbox, misc_host_ctrl;
9011 int_mbox = tr32_mailbox(tnapi->int_mbox);
9012 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9014 if ((int_mbox != 0) ||
9015 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9023 tg3_disable_ints(tp);
9025 free_irq(tnapi->irq_vec, tnapi);
9027 err = tg3_request_irq(tp, 0);
9033 /* Reenable MSI one shot mode. */
9034 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9035 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9036 tw32(MSGINT_MODE, val);
9044 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9045 * successfully restored
9047 static int tg3_test_msi(struct tg3 *tp)
9052 if (!tg3_flag(tp, USING_MSI))
9055 /* Turn off SERR reporting in case MSI terminates with Master
9058 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9059 pci_write_config_word(tp->pdev, PCI_COMMAND,
9060 pci_cmd & ~PCI_COMMAND_SERR);
9062 err = tg3_test_interrupt(tp);
9064 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9069 /* other failures */
9073 /* MSI test failed, go back to INTx mode */
9074 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9075 "to INTx mode. Please report this failure to the PCI "
9076 "maintainer and include system chipset information\n");
9078 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9080 pci_disable_msi(tp->pdev);
9082 tg3_flag_clear(tp, USING_MSI);
9083 tp->napi[0].irq_vec = tp->pdev->irq;
9085 err = tg3_request_irq(tp, 0);
9089 /* Need to reset the chip because the MSI cycle may have terminated
9090 * with Master Abort.
9092 tg3_full_lock(tp, 1);
9094 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9095 err = tg3_init_hw(tp, 1);
9097 tg3_full_unlock(tp);
9100 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9105 static int tg3_request_firmware(struct tg3 *tp)
9107 const __be32 *fw_data;
9109 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9110 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9115 fw_data = (void *)tp->fw->data;
9117 /* Firmware blob starts with version numbers, followed by
9118 * start address and _full_ length including BSS sections
9119 * (which must be longer than the actual data, of course
9122 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9123 if (tp->fw_len < (tp->fw->size - 12)) {
9124 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9125 tp->fw_len, tp->fw_needed);
9126 release_firmware(tp->fw);
9131 /* We no longer need firmware; we have it. */
9132 tp->fw_needed = NULL;
9136 static bool tg3_enable_msix(struct tg3 *tp)
9138 int i, rc, cpus = num_online_cpus();
9139 struct msix_entry msix_ent[tp->irq_max];
9142 /* Just fallback to the simpler MSI mode. */
9146 * We want as many rx rings enabled as there are cpus.
9147 * The first MSIX vector only deals with link interrupts, etc,
9148 * so we add one to the number of vectors we are requesting.
9150 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9152 for (i = 0; i < tp->irq_max; i++) {
9153 msix_ent[i].entry = i;
9154 msix_ent[i].vector = 0;
9157 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9160 } else if (rc != 0) {
9161 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9163 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9168 for (i = 0; i < tp->irq_max; i++)
9169 tp->napi[i].irq_vec = msix_ent[i].vector;
9171 netif_set_real_num_tx_queues(tp->dev, 1);
9172 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9173 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9174 pci_disable_msix(tp->pdev);
9178 if (tp->irq_cnt > 1) {
9179 tg3_flag_set(tp, ENABLE_RSS);
9181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9183 tg3_flag_set(tp, ENABLE_TSS);
9184 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9191 static void tg3_ints_init(struct tg3 *tp)
9193 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9194 !tg3_flag(tp, TAGGED_STATUS)) {
9195 /* All MSI supporting chips should support tagged
9196 * status. Assert that this is the case.
9198 netdev_warn(tp->dev,
9199 "MSI without TAGGED_STATUS? Not using MSI\n");
9203 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9204 tg3_flag_set(tp, USING_MSIX);
9205 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9206 tg3_flag_set(tp, USING_MSI);
9208 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9209 u32 msi_mode = tr32(MSGINT_MODE);
9210 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9211 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9212 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9215 if (!tg3_flag(tp, USING_MSIX)) {
9217 tp->napi[0].irq_vec = tp->pdev->irq;
9218 netif_set_real_num_tx_queues(tp->dev, 1);
9219 netif_set_real_num_rx_queues(tp->dev, 1);
9223 static void tg3_ints_fini(struct tg3 *tp)
9225 if (tg3_flag(tp, USING_MSIX))
9226 pci_disable_msix(tp->pdev);
9227 else if (tg3_flag(tp, USING_MSI))
9228 pci_disable_msi(tp->pdev);
9229 tg3_flag_clear(tp, USING_MSI);
9230 tg3_flag_clear(tp, USING_MSIX);
9231 tg3_flag_clear(tp, ENABLE_RSS);
9232 tg3_flag_clear(tp, ENABLE_TSS);
9235 static int tg3_open(struct net_device *dev)
9237 struct tg3 *tp = netdev_priv(dev);
9240 if (tp->fw_needed) {
9241 err = tg3_request_firmware(tp);
9242 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9246 netdev_warn(tp->dev, "TSO capability disabled\n");
9247 tg3_flag_clear(tp, TSO_CAPABLE);
9248 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9249 netdev_notice(tp->dev, "TSO capability restored\n");
9250 tg3_flag_set(tp, TSO_CAPABLE);
9254 netif_carrier_off(tp->dev);
9256 err = tg3_power_up(tp);
9260 tg3_full_lock(tp, 0);
9262 tg3_disable_ints(tp);
9263 tg3_flag_clear(tp, INIT_COMPLETE);
9265 tg3_full_unlock(tp);
9268 * Setup interrupts first so we know how
9269 * many NAPI resources to allocate
9273 /* The placement of this call is tied
9274 * to the setup and use of Host TX descriptors.
9276 err = tg3_alloc_consistent(tp);
9282 tg3_napi_enable(tp);
9284 for (i = 0; i < tp->irq_cnt; i++) {
9285 struct tg3_napi *tnapi = &tp->napi[i];
9286 err = tg3_request_irq(tp, i);
9288 for (i--; i >= 0; i--)
9289 free_irq(tnapi->irq_vec, tnapi);
9297 tg3_full_lock(tp, 0);
9299 err = tg3_init_hw(tp, 1);
9301 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9304 if (tg3_flag(tp, TAGGED_STATUS))
9305 tp->timer_offset = HZ;
9307 tp->timer_offset = HZ / 10;
9309 BUG_ON(tp->timer_offset > HZ);
9310 tp->timer_counter = tp->timer_multiplier =
9311 (HZ / tp->timer_offset);
9312 tp->asf_counter = tp->asf_multiplier =
9313 ((HZ / tp->timer_offset) * 2);
9315 init_timer(&tp->timer);
9316 tp->timer.expires = jiffies + tp->timer_offset;
9317 tp->timer.data = (unsigned long) tp;
9318 tp->timer.function = tg3_timer;
9321 tg3_full_unlock(tp);
9326 if (tg3_flag(tp, USING_MSI)) {
9327 err = tg3_test_msi(tp);
9330 tg3_full_lock(tp, 0);
9331 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9333 tg3_full_unlock(tp);
9338 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9339 u32 val = tr32(PCIE_TRANSACTION_CFG);
9341 tw32(PCIE_TRANSACTION_CFG,
9342 val | PCIE_TRANS_CFG_1SHOT_MSI);
9348 tg3_full_lock(tp, 0);
9350 add_timer(&tp->timer);
9351 tg3_flag_set(tp, INIT_COMPLETE);
9352 tg3_enable_ints(tp);
9354 tg3_full_unlock(tp);
9356 netif_tx_start_all_queues(dev);
9359 * Reset loopback feature if it was turned on while the device was down
9360 * make sure that it's installed properly now.
9362 if (dev->features & NETIF_F_LOOPBACK)
9363 tg3_set_loopback(dev, dev->features);
9368 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9369 struct tg3_napi *tnapi = &tp->napi[i];
9370 free_irq(tnapi->irq_vec, tnapi);
9374 tg3_napi_disable(tp);
9376 tg3_free_consistent(tp);
9383 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9384 struct rtnl_link_stats64 *);
9385 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9387 static int tg3_close(struct net_device *dev)
9390 struct tg3 *tp = netdev_priv(dev);
9392 tg3_napi_disable(tp);
9393 cancel_work_sync(&tp->reset_task);
9395 netif_tx_stop_all_queues(dev);
9397 del_timer_sync(&tp->timer);
9401 tg3_full_lock(tp, 1);
9403 tg3_disable_ints(tp);
9405 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9407 tg3_flag_clear(tp, INIT_COMPLETE);
9409 tg3_full_unlock(tp);
9411 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9412 struct tg3_napi *tnapi = &tp->napi[i];
9413 free_irq(tnapi->irq_vec, tnapi);
9418 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9420 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9421 sizeof(tp->estats_prev));
9425 tg3_free_consistent(tp);
9429 netif_carrier_off(tp->dev);
9434 static inline u64 get_stat64(tg3_stat64_t *val)
9436 return ((u64)val->high << 32) | ((u64)val->low);
9439 static u64 calc_crc_errors(struct tg3 *tp)
9441 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9443 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9444 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9448 spin_lock_bh(&tp->lock);
9449 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9450 tg3_writephy(tp, MII_TG3_TEST1,
9451 val | MII_TG3_TEST1_CRC_EN);
9452 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9455 spin_unlock_bh(&tp->lock);
9457 tp->phy_crc_errors += val;
9459 return tp->phy_crc_errors;
9462 return get_stat64(&hw_stats->rx_fcs_errors);
9465 #define ESTAT_ADD(member) \
9466 estats->member = old_estats->member + \
9467 get_stat64(&hw_stats->member)
9469 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9471 struct tg3_ethtool_stats *estats = &tp->estats;
9472 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9473 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9478 ESTAT_ADD(rx_octets);
9479 ESTAT_ADD(rx_fragments);
9480 ESTAT_ADD(rx_ucast_packets);
9481 ESTAT_ADD(rx_mcast_packets);
9482 ESTAT_ADD(rx_bcast_packets);
9483 ESTAT_ADD(rx_fcs_errors);
9484 ESTAT_ADD(rx_align_errors);
9485 ESTAT_ADD(rx_xon_pause_rcvd);
9486 ESTAT_ADD(rx_xoff_pause_rcvd);
9487 ESTAT_ADD(rx_mac_ctrl_rcvd);
9488 ESTAT_ADD(rx_xoff_entered);
9489 ESTAT_ADD(rx_frame_too_long_errors);
9490 ESTAT_ADD(rx_jabbers);
9491 ESTAT_ADD(rx_undersize_packets);
9492 ESTAT_ADD(rx_in_length_errors);
9493 ESTAT_ADD(rx_out_length_errors);
9494 ESTAT_ADD(rx_64_or_less_octet_packets);
9495 ESTAT_ADD(rx_65_to_127_octet_packets);
9496 ESTAT_ADD(rx_128_to_255_octet_packets);
9497 ESTAT_ADD(rx_256_to_511_octet_packets);
9498 ESTAT_ADD(rx_512_to_1023_octet_packets);
9499 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9500 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9501 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9502 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9503 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9505 ESTAT_ADD(tx_octets);
9506 ESTAT_ADD(tx_collisions);
9507 ESTAT_ADD(tx_xon_sent);
9508 ESTAT_ADD(tx_xoff_sent);
9509 ESTAT_ADD(tx_flow_control);
9510 ESTAT_ADD(tx_mac_errors);
9511 ESTAT_ADD(tx_single_collisions);
9512 ESTAT_ADD(tx_mult_collisions);
9513 ESTAT_ADD(tx_deferred);
9514 ESTAT_ADD(tx_excessive_collisions);
9515 ESTAT_ADD(tx_late_collisions);
9516 ESTAT_ADD(tx_collide_2times);
9517 ESTAT_ADD(tx_collide_3times);
9518 ESTAT_ADD(tx_collide_4times);
9519 ESTAT_ADD(tx_collide_5times);
9520 ESTAT_ADD(tx_collide_6times);
9521 ESTAT_ADD(tx_collide_7times);
9522 ESTAT_ADD(tx_collide_8times);
9523 ESTAT_ADD(tx_collide_9times);
9524 ESTAT_ADD(tx_collide_10times);
9525 ESTAT_ADD(tx_collide_11times);
9526 ESTAT_ADD(tx_collide_12times);
9527 ESTAT_ADD(tx_collide_13times);
9528 ESTAT_ADD(tx_collide_14times);
9529 ESTAT_ADD(tx_collide_15times);
9530 ESTAT_ADD(tx_ucast_packets);
9531 ESTAT_ADD(tx_mcast_packets);
9532 ESTAT_ADD(tx_bcast_packets);
9533 ESTAT_ADD(tx_carrier_sense_errors);
9534 ESTAT_ADD(tx_discards);
9535 ESTAT_ADD(tx_errors);
9537 ESTAT_ADD(dma_writeq_full);
9538 ESTAT_ADD(dma_write_prioq_full);
9539 ESTAT_ADD(rxbds_empty);
9540 ESTAT_ADD(rx_discards);
9541 ESTAT_ADD(rx_errors);
9542 ESTAT_ADD(rx_threshold_hit);
9544 ESTAT_ADD(dma_readq_full);
9545 ESTAT_ADD(dma_read_prioq_full);
9546 ESTAT_ADD(tx_comp_queue_full);
9548 ESTAT_ADD(ring_set_send_prod_index);
9549 ESTAT_ADD(ring_status_update);
9550 ESTAT_ADD(nic_irqs);
9551 ESTAT_ADD(nic_avoided_irqs);
9552 ESTAT_ADD(nic_tx_threshold_hit);
9554 ESTAT_ADD(mbuf_lwm_thresh_hit);
9559 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9560 struct rtnl_link_stats64 *stats)
9562 struct tg3 *tp = netdev_priv(dev);
9563 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9564 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9569 stats->rx_packets = old_stats->rx_packets +
9570 get_stat64(&hw_stats->rx_ucast_packets) +
9571 get_stat64(&hw_stats->rx_mcast_packets) +
9572 get_stat64(&hw_stats->rx_bcast_packets);
9574 stats->tx_packets = old_stats->tx_packets +
9575 get_stat64(&hw_stats->tx_ucast_packets) +
9576 get_stat64(&hw_stats->tx_mcast_packets) +
9577 get_stat64(&hw_stats->tx_bcast_packets);
9579 stats->rx_bytes = old_stats->rx_bytes +
9580 get_stat64(&hw_stats->rx_octets);
9581 stats->tx_bytes = old_stats->tx_bytes +
9582 get_stat64(&hw_stats->tx_octets);
9584 stats->rx_errors = old_stats->rx_errors +
9585 get_stat64(&hw_stats->rx_errors);
9586 stats->tx_errors = old_stats->tx_errors +
9587 get_stat64(&hw_stats->tx_errors) +
9588 get_stat64(&hw_stats->tx_mac_errors) +
9589 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9590 get_stat64(&hw_stats->tx_discards);
9592 stats->multicast = old_stats->multicast +
9593 get_stat64(&hw_stats->rx_mcast_packets);
9594 stats->collisions = old_stats->collisions +
9595 get_stat64(&hw_stats->tx_collisions);
9597 stats->rx_length_errors = old_stats->rx_length_errors +
9598 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9599 get_stat64(&hw_stats->rx_undersize_packets);
9601 stats->rx_over_errors = old_stats->rx_over_errors +
9602 get_stat64(&hw_stats->rxbds_empty);
9603 stats->rx_frame_errors = old_stats->rx_frame_errors +
9604 get_stat64(&hw_stats->rx_align_errors);
9605 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9606 get_stat64(&hw_stats->tx_discards);
9607 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9608 get_stat64(&hw_stats->tx_carrier_sense_errors);
9610 stats->rx_crc_errors = old_stats->rx_crc_errors +
9611 calc_crc_errors(tp);
9613 stats->rx_missed_errors = old_stats->rx_missed_errors +
9614 get_stat64(&hw_stats->rx_discards);
9616 stats->rx_dropped = tp->rx_dropped;
9621 static inline u32 calc_crc(unsigned char *buf, int len)
9629 for (j = 0; j < len; j++) {
9632 for (k = 0; k < 8; k++) {
9645 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9647 /* accept or reject all multicast frames */
9648 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9649 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9650 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9651 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9654 static void __tg3_set_rx_mode(struct net_device *dev)
9656 struct tg3 *tp = netdev_priv(dev);
9659 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9660 RX_MODE_KEEP_VLAN_TAG);
9662 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9663 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9666 if (!tg3_flag(tp, ENABLE_ASF))
9667 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9670 if (dev->flags & IFF_PROMISC) {
9671 /* Promiscuous mode. */
9672 rx_mode |= RX_MODE_PROMISC;
9673 } else if (dev->flags & IFF_ALLMULTI) {
9674 /* Accept all multicast. */
9675 tg3_set_multi(tp, 1);
9676 } else if (netdev_mc_empty(dev)) {
9677 /* Reject all multicast. */
9678 tg3_set_multi(tp, 0);
9680 /* Accept one or more multicast(s). */
9681 struct netdev_hw_addr *ha;
9682 u32 mc_filter[4] = { 0, };
9687 netdev_for_each_mc_addr(ha, dev) {
9688 crc = calc_crc(ha->addr, ETH_ALEN);
9690 regidx = (bit & 0x60) >> 5;
9692 mc_filter[regidx] |= (1 << bit);
9695 tw32(MAC_HASH_REG_0, mc_filter[0]);
9696 tw32(MAC_HASH_REG_1, mc_filter[1]);
9697 tw32(MAC_HASH_REG_2, mc_filter[2]);
9698 tw32(MAC_HASH_REG_3, mc_filter[3]);
9701 if (rx_mode != tp->rx_mode) {
9702 tp->rx_mode = rx_mode;
9703 tw32_f(MAC_RX_MODE, rx_mode);
9708 static void tg3_set_rx_mode(struct net_device *dev)
9710 struct tg3 *tp = netdev_priv(dev);
9712 if (!netif_running(dev))
9715 tg3_full_lock(tp, 0);
9716 __tg3_set_rx_mode(dev);
9717 tg3_full_unlock(tp);
9720 static int tg3_get_regs_len(struct net_device *dev)
9722 return TG3_REG_BLK_SIZE;
9725 static void tg3_get_regs(struct net_device *dev,
9726 struct ethtool_regs *regs, void *_p)
9728 struct tg3 *tp = netdev_priv(dev);
9732 memset(_p, 0, TG3_REG_BLK_SIZE);
9734 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9737 tg3_full_lock(tp, 0);
9739 tg3_dump_legacy_regs(tp, (u32 *)_p);
9741 tg3_full_unlock(tp);
9744 static int tg3_get_eeprom_len(struct net_device *dev)
9746 struct tg3 *tp = netdev_priv(dev);
9748 return tp->nvram_size;
9751 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9753 struct tg3 *tp = netdev_priv(dev);
9756 u32 i, offset, len, b_offset, b_count;
9759 if (tg3_flag(tp, NO_NVRAM))
9762 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9765 offset = eeprom->offset;
9769 eeprom->magic = TG3_EEPROM_MAGIC;
9772 /* adjustments to start on required 4 byte boundary */
9773 b_offset = offset & 3;
9774 b_count = 4 - b_offset;
9775 if (b_count > len) {
9776 /* i.e. offset=1 len=2 */
9779 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9782 memcpy(data, ((char *)&val) + b_offset, b_count);
9785 eeprom->len += b_count;
9788 /* read bytes up to the last 4 byte boundary */
9789 pd = &data[eeprom->len];
9790 for (i = 0; i < (len - (len & 3)); i += 4) {
9791 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9796 memcpy(pd + i, &val, 4);
9801 /* read last bytes not ending on 4 byte boundary */
9802 pd = &data[eeprom->len];
9804 b_offset = offset + len - b_count;
9805 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9808 memcpy(pd, &val, b_count);
9809 eeprom->len += b_count;
9814 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9816 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9818 struct tg3 *tp = netdev_priv(dev);
9820 u32 offset, len, b_offset, odd_len;
9824 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9827 if (tg3_flag(tp, NO_NVRAM) ||
9828 eeprom->magic != TG3_EEPROM_MAGIC)
9831 offset = eeprom->offset;
9834 if ((b_offset = (offset & 3))) {
9835 /* adjustments to start on required 4 byte boundary */
9836 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9847 /* adjustments to end on required 4 byte boundary */
9849 len = (len + 3) & ~3;
9850 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9856 if (b_offset || odd_len) {
9857 buf = kmalloc(len, GFP_KERNEL);
9861 memcpy(buf, &start, 4);
9863 memcpy(buf+len-4, &end, 4);
9864 memcpy(buf + b_offset, data, eeprom->len);
9867 ret = tg3_nvram_write_block(tp, offset, len, buf);
9875 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9877 struct tg3 *tp = netdev_priv(dev);
9879 if (tg3_flag(tp, USE_PHYLIB)) {
9880 struct phy_device *phydev;
9881 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9883 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9884 return phy_ethtool_gset(phydev, cmd);
9887 cmd->supported = (SUPPORTED_Autoneg);
9889 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9890 cmd->supported |= (SUPPORTED_1000baseT_Half |
9891 SUPPORTED_1000baseT_Full);
9893 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9894 cmd->supported |= (SUPPORTED_100baseT_Half |
9895 SUPPORTED_100baseT_Full |
9896 SUPPORTED_10baseT_Half |
9897 SUPPORTED_10baseT_Full |
9899 cmd->port = PORT_TP;
9901 cmd->supported |= SUPPORTED_FIBRE;
9902 cmd->port = PORT_FIBRE;
9905 cmd->advertising = tp->link_config.advertising;
9906 if (netif_running(dev)) {
9907 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9908 cmd->duplex = tp->link_config.active_duplex;
9910 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9911 cmd->duplex = DUPLEX_INVALID;
9913 cmd->phy_address = tp->phy_addr;
9914 cmd->transceiver = XCVR_INTERNAL;
9915 cmd->autoneg = tp->link_config.autoneg;
9921 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9923 struct tg3 *tp = netdev_priv(dev);
9924 u32 speed = ethtool_cmd_speed(cmd);
9926 if (tg3_flag(tp, USE_PHYLIB)) {
9927 struct phy_device *phydev;
9928 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9930 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9931 return phy_ethtool_sset(phydev, cmd);
9934 if (cmd->autoneg != AUTONEG_ENABLE &&
9935 cmd->autoneg != AUTONEG_DISABLE)
9938 if (cmd->autoneg == AUTONEG_DISABLE &&
9939 cmd->duplex != DUPLEX_FULL &&
9940 cmd->duplex != DUPLEX_HALF)
9943 if (cmd->autoneg == AUTONEG_ENABLE) {
9944 u32 mask = ADVERTISED_Autoneg |
9946 ADVERTISED_Asym_Pause;
9948 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9949 mask |= ADVERTISED_1000baseT_Half |
9950 ADVERTISED_1000baseT_Full;
9952 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9953 mask |= ADVERTISED_100baseT_Half |
9954 ADVERTISED_100baseT_Full |
9955 ADVERTISED_10baseT_Half |
9956 ADVERTISED_10baseT_Full |
9959 mask |= ADVERTISED_FIBRE;
9961 if (cmd->advertising & ~mask)
9964 mask &= (ADVERTISED_1000baseT_Half |
9965 ADVERTISED_1000baseT_Full |
9966 ADVERTISED_100baseT_Half |
9967 ADVERTISED_100baseT_Full |
9968 ADVERTISED_10baseT_Half |
9969 ADVERTISED_10baseT_Full);
9971 cmd->advertising &= mask;
9973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9974 if (speed != SPEED_1000)
9977 if (cmd->duplex != DUPLEX_FULL)
9980 if (speed != SPEED_100 &&
9986 tg3_full_lock(tp, 0);
9988 tp->link_config.autoneg = cmd->autoneg;
9989 if (cmd->autoneg == AUTONEG_ENABLE) {
9990 tp->link_config.advertising = (cmd->advertising |
9991 ADVERTISED_Autoneg);
9992 tp->link_config.speed = SPEED_INVALID;
9993 tp->link_config.duplex = DUPLEX_INVALID;
9995 tp->link_config.advertising = 0;
9996 tp->link_config.speed = speed;
9997 tp->link_config.duplex = cmd->duplex;
10000 tp->link_config.orig_speed = tp->link_config.speed;
10001 tp->link_config.orig_duplex = tp->link_config.duplex;
10002 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10004 if (netif_running(dev))
10005 tg3_setup_phy(tp, 1);
10007 tg3_full_unlock(tp);
10012 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10014 struct tg3 *tp = netdev_priv(dev);
10016 strcpy(info->driver, DRV_MODULE_NAME);
10017 strcpy(info->version, DRV_MODULE_VERSION);
10018 strcpy(info->fw_version, tp->fw_ver);
10019 strcpy(info->bus_info, pci_name(tp->pdev));
10022 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10024 struct tg3 *tp = netdev_priv(dev);
10026 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10027 wol->supported = WAKE_MAGIC;
10029 wol->supported = 0;
10031 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10032 wol->wolopts = WAKE_MAGIC;
10033 memset(&wol->sopass, 0, sizeof(wol->sopass));
10036 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10038 struct tg3 *tp = netdev_priv(dev);
10039 struct device *dp = &tp->pdev->dev;
10041 if (wol->wolopts & ~WAKE_MAGIC)
10043 if ((wol->wolopts & WAKE_MAGIC) &&
10044 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10047 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10049 spin_lock_bh(&tp->lock);
10050 if (device_may_wakeup(dp))
10051 tg3_flag_set(tp, WOL_ENABLE);
10053 tg3_flag_clear(tp, WOL_ENABLE);
10054 spin_unlock_bh(&tp->lock);
10059 static u32 tg3_get_msglevel(struct net_device *dev)
10061 struct tg3 *tp = netdev_priv(dev);
10062 return tp->msg_enable;
10065 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10067 struct tg3 *tp = netdev_priv(dev);
10068 tp->msg_enable = value;
10071 static int tg3_nway_reset(struct net_device *dev)
10073 struct tg3 *tp = netdev_priv(dev);
10076 if (!netif_running(dev))
10079 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10082 if (tg3_flag(tp, USE_PHYLIB)) {
10083 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10085 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10089 spin_lock_bh(&tp->lock);
10091 tg3_readphy(tp, MII_BMCR, &bmcr);
10092 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10093 ((bmcr & BMCR_ANENABLE) ||
10094 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10095 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10099 spin_unlock_bh(&tp->lock);
10105 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10107 struct tg3 *tp = netdev_priv(dev);
10109 ering->rx_max_pending = tp->rx_std_ring_mask;
10110 ering->rx_mini_max_pending = 0;
10111 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10112 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10114 ering->rx_jumbo_max_pending = 0;
10116 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10118 ering->rx_pending = tp->rx_pending;
10119 ering->rx_mini_pending = 0;
10120 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10121 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10123 ering->rx_jumbo_pending = 0;
10125 ering->tx_pending = tp->napi[0].tx_pending;
10128 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10130 struct tg3 *tp = netdev_priv(dev);
10131 int i, irq_sync = 0, err = 0;
10133 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10134 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10135 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10136 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10137 (tg3_flag(tp, TSO_BUG) &&
10138 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10141 if (netif_running(dev)) {
10143 tg3_netif_stop(tp);
10147 tg3_full_lock(tp, irq_sync);
10149 tp->rx_pending = ering->rx_pending;
10151 if (tg3_flag(tp, MAX_RXPEND_64) &&
10152 tp->rx_pending > 63)
10153 tp->rx_pending = 63;
10154 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10156 for (i = 0; i < tp->irq_max; i++)
10157 tp->napi[i].tx_pending = ering->tx_pending;
10159 if (netif_running(dev)) {
10160 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10161 err = tg3_restart_hw(tp, 1);
10163 tg3_netif_start(tp);
10166 tg3_full_unlock(tp);
10168 if (irq_sync && !err)
10174 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10176 struct tg3 *tp = netdev_priv(dev);
10178 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10180 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10181 epause->rx_pause = 1;
10183 epause->rx_pause = 0;
10185 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10186 epause->tx_pause = 1;
10188 epause->tx_pause = 0;
10191 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10193 struct tg3 *tp = netdev_priv(dev);
10196 if (tg3_flag(tp, USE_PHYLIB)) {
10198 struct phy_device *phydev;
10200 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10202 if (!(phydev->supported & SUPPORTED_Pause) ||
10203 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10204 (epause->rx_pause != epause->tx_pause)))
10207 tp->link_config.flowctrl = 0;
10208 if (epause->rx_pause) {
10209 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10211 if (epause->tx_pause) {
10212 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10213 newadv = ADVERTISED_Pause;
10215 newadv = ADVERTISED_Pause |
10216 ADVERTISED_Asym_Pause;
10217 } else if (epause->tx_pause) {
10218 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10219 newadv = ADVERTISED_Asym_Pause;
10223 if (epause->autoneg)
10224 tg3_flag_set(tp, PAUSE_AUTONEG);
10226 tg3_flag_clear(tp, PAUSE_AUTONEG);
10228 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10229 u32 oldadv = phydev->advertising &
10230 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10231 if (oldadv != newadv) {
10232 phydev->advertising &=
10233 ~(ADVERTISED_Pause |
10234 ADVERTISED_Asym_Pause);
10235 phydev->advertising |= newadv;
10236 if (phydev->autoneg) {
10238 * Always renegotiate the link to
10239 * inform our link partner of our
10240 * flow control settings, even if the
10241 * flow control is forced. Let
10242 * tg3_adjust_link() do the final
10243 * flow control setup.
10245 return phy_start_aneg(phydev);
10249 if (!epause->autoneg)
10250 tg3_setup_flow_control(tp, 0, 0);
10252 tp->link_config.orig_advertising &=
10253 ~(ADVERTISED_Pause |
10254 ADVERTISED_Asym_Pause);
10255 tp->link_config.orig_advertising |= newadv;
10260 if (netif_running(dev)) {
10261 tg3_netif_stop(tp);
10265 tg3_full_lock(tp, irq_sync);
10267 if (epause->autoneg)
10268 tg3_flag_set(tp, PAUSE_AUTONEG);
10270 tg3_flag_clear(tp, PAUSE_AUTONEG);
10271 if (epause->rx_pause)
10272 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10274 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10275 if (epause->tx_pause)
10276 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10278 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10280 if (netif_running(dev)) {
10281 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10282 err = tg3_restart_hw(tp, 1);
10284 tg3_netif_start(tp);
10287 tg3_full_unlock(tp);
10293 static int tg3_get_sset_count(struct net_device *dev, int sset)
10297 return TG3_NUM_TEST;
10299 return TG3_NUM_STATS;
10301 return -EOPNOTSUPP;
10305 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10307 switch (stringset) {
10309 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10312 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10315 WARN_ON(1); /* we need a WARN() */
10320 static int tg3_set_phys_id(struct net_device *dev,
10321 enum ethtool_phys_id_state state)
10323 struct tg3 *tp = netdev_priv(dev);
10325 if (!netif_running(tp->dev))
10329 case ETHTOOL_ID_ACTIVE:
10330 return 1; /* cycle on/off once per second */
10332 case ETHTOOL_ID_ON:
10333 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10334 LED_CTRL_1000MBPS_ON |
10335 LED_CTRL_100MBPS_ON |
10336 LED_CTRL_10MBPS_ON |
10337 LED_CTRL_TRAFFIC_OVERRIDE |
10338 LED_CTRL_TRAFFIC_BLINK |
10339 LED_CTRL_TRAFFIC_LED);
10342 case ETHTOOL_ID_OFF:
10343 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10344 LED_CTRL_TRAFFIC_OVERRIDE);
10347 case ETHTOOL_ID_INACTIVE:
10348 tw32(MAC_LED_CTRL, tp->led_ctrl);
10355 static void tg3_get_ethtool_stats(struct net_device *dev,
10356 struct ethtool_stats *estats, u64 *tmp_stats)
10358 struct tg3 *tp = netdev_priv(dev);
10359 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10362 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10366 u32 offset = 0, len = 0;
10369 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10372 if (magic == TG3_EEPROM_MAGIC) {
10373 for (offset = TG3_NVM_DIR_START;
10374 offset < TG3_NVM_DIR_END;
10375 offset += TG3_NVM_DIRENT_SIZE) {
10376 if (tg3_nvram_read(tp, offset, &val))
10379 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10380 TG3_NVM_DIRTYPE_EXTVPD)
10384 if (offset != TG3_NVM_DIR_END) {
10385 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10386 if (tg3_nvram_read(tp, offset + 4, &offset))
10389 offset = tg3_nvram_logical_addr(tp, offset);
10393 if (!offset || !len) {
10394 offset = TG3_NVM_VPD_OFF;
10395 len = TG3_NVM_VPD_LEN;
10398 buf = kmalloc(len, GFP_KERNEL);
10402 if (magic == TG3_EEPROM_MAGIC) {
10403 for (i = 0; i < len; i += 4) {
10404 /* The data is in little-endian format in NVRAM.
10405 * Use the big-endian read routines to preserve
10406 * the byte order as it exists in NVRAM.
10408 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10414 unsigned int pos = 0;
10416 ptr = (u8 *)&buf[0];
10417 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10418 cnt = pci_read_vpd(tp->pdev, pos,
10420 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10436 #define NVRAM_TEST_SIZE 0x100
10437 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10438 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10439 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10440 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10441 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10443 static int tg3_test_nvram(struct tg3 *tp)
10447 int i, j, k, err = 0, size;
10449 if (tg3_flag(tp, NO_NVRAM))
10452 if (tg3_nvram_read(tp, 0, &magic) != 0)
10455 if (magic == TG3_EEPROM_MAGIC)
10456 size = NVRAM_TEST_SIZE;
10457 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10458 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10459 TG3_EEPROM_SB_FORMAT_1) {
10460 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10461 case TG3_EEPROM_SB_REVISION_0:
10462 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10464 case TG3_EEPROM_SB_REVISION_2:
10465 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10467 case TG3_EEPROM_SB_REVISION_3:
10468 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10475 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10476 size = NVRAM_SELFBOOT_HW_SIZE;
10480 buf = kmalloc(size, GFP_KERNEL);
10485 for (i = 0, j = 0; i < size; i += 4, j++) {
10486 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10493 /* Selfboot format */
10494 magic = be32_to_cpu(buf[0]);
10495 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10496 TG3_EEPROM_MAGIC_FW) {
10497 u8 *buf8 = (u8 *) buf, csum8 = 0;
10499 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10500 TG3_EEPROM_SB_REVISION_2) {
10501 /* For rev 2, the csum doesn't include the MBA. */
10502 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10504 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10507 for (i = 0; i < size; i++)
10520 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10521 TG3_EEPROM_MAGIC_HW) {
10522 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10523 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10524 u8 *buf8 = (u8 *) buf;
10526 /* Separate the parity bits and the data bytes. */
10527 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10528 if ((i == 0) || (i == 8)) {
10532 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10533 parity[k++] = buf8[i] & msk;
10535 } else if (i == 16) {
10539 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10540 parity[k++] = buf8[i] & msk;
10543 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10544 parity[k++] = buf8[i] & msk;
10547 data[j++] = buf8[i];
10551 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10552 u8 hw8 = hweight8(data[i]);
10554 if ((hw8 & 0x1) && parity[i])
10556 else if (!(hw8 & 0x1) && !parity[i])
10565 /* Bootstrap checksum at offset 0x10 */
10566 csum = calc_crc((unsigned char *) buf, 0x10);
10567 if (csum != le32_to_cpu(buf[0x10/4]))
10570 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10571 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10572 if (csum != le32_to_cpu(buf[0xfc/4]))
10577 buf = tg3_vpd_readblock(tp);
10581 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10582 PCI_VPD_LRDT_RO_DATA);
10584 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10588 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10591 i += PCI_VPD_LRDT_TAG_SIZE;
10592 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10593 PCI_VPD_RO_KEYWORD_CHKSUM);
10597 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10599 for (i = 0; i <= j; i++)
10600 csum8 += ((u8 *)buf)[i];
10614 #define TG3_SERDES_TIMEOUT_SEC 2
10615 #define TG3_COPPER_TIMEOUT_SEC 6
10617 static int tg3_test_link(struct tg3 *tp)
10621 if (!netif_running(tp->dev))
10624 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10625 max = TG3_SERDES_TIMEOUT_SEC;
10627 max = TG3_COPPER_TIMEOUT_SEC;
10629 for (i = 0; i < max; i++) {
10630 if (netif_carrier_ok(tp->dev))
10633 if (msleep_interruptible(1000))
10640 /* Only test the commonly used registers */
10641 static int tg3_test_registers(struct tg3 *tp)
10643 int i, is_5705, is_5750;
10644 u32 offset, read_mask, write_mask, val, save_val, read_val;
10648 #define TG3_FL_5705 0x1
10649 #define TG3_FL_NOT_5705 0x2
10650 #define TG3_FL_NOT_5788 0x4
10651 #define TG3_FL_NOT_5750 0x8
10655 /* MAC Control Registers */
10656 { MAC_MODE, TG3_FL_NOT_5705,
10657 0x00000000, 0x00ef6f8c },
10658 { MAC_MODE, TG3_FL_5705,
10659 0x00000000, 0x01ef6b8c },
10660 { MAC_STATUS, TG3_FL_NOT_5705,
10661 0x03800107, 0x00000000 },
10662 { MAC_STATUS, TG3_FL_5705,
10663 0x03800100, 0x00000000 },
10664 { MAC_ADDR_0_HIGH, 0x0000,
10665 0x00000000, 0x0000ffff },
10666 { MAC_ADDR_0_LOW, 0x0000,
10667 0x00000000, 0xffffffff },
10668 { MAC_RX_MTU_SIZE, 0x0000,
10669 0x00000000, 0x0000ffff },
10670 { MAC_TX_MODE, 0x0000,
10671 0x00000000, 0x00000070 },
10672 { MAC_TX_LENGTHS, 0x0000,
10673 0x00000000, 0x00003fff },
10674 { MAC_RX_MODE, TG3_FL_NOT_5705,
10675 0x00000000, 0x000007fc },
10676 { MAC_RX_MODE, TG3_FL_5705,
10677 0x00000000, 0x000007dc },
10678 { MAC_HASH_REG_0, 0x0000,
10679 0x00000000, 0xffffffff },
10680 { MAC_HASH_REG_1, 0x0000,
10681 0x00000000, 0xffffffff },
10682 { MAC_HASH_REG_2, 0x0000,
10683 0x00000000, 0xffffffff },
10684 { MAC_HASH_REG_3, 0x0000,
10685 0x00000000, 0xffffffff },
10687 /* Receive Data and Receive BD Initiator Control Registers. */
10688 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10689 0x00000000, 0xffffffff },
10690 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10691 0x00000000, 0xffffffff },
10692 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10693 0x00000000, 0x00000003 },
10694 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10695 0x00000000, 0xffffffff },
10696 { RCVDBDI_STD_BD+0, 0x0000,
10697 0x00000000, 0xffffffff },
10698 { RCVDBDI_STD_BD+4, 0x0000,
10699 0x00000000, 0xffffffff },
10700 { RCVDBDI_STD_BD+8, 0x0000,
10701 0x00000000, 0xffff0002 },
10702 { RCVDBDI_STD_BD+0xc, 0x0000,
10703 0x00000000, 0xffffffff },
10705 /* Receive BD Initiator Control Registers. */
10706 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10707 0x00000000, 0xffffffff },
10708 { RCVBDI_STD_THRESH, TG3_FL_5705,
10709 0x00000000, 0x000003ff },
10710 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10711 0x00000000, 0xffffffff },
10713 /* Host Coalescing Control Registers. */
10714 { HOSTCC_MODE, TG3_FL_NOT_5705,
10715 0x00000000, 0x00000004 },
10716 { HOSTCC_MODE, TG3_FL_5705,
10717 0x00000000, 0x000000f6 },
10718 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10719 0x00000000, 0xffffffff },
10720 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10721 0x00000000, 0x000003ff },
10722 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10723 0x00000000, 0xffffffff },
10724 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10725 0x00000000, 0x000003ff },
10726 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10727 0x00000000, 0xffffffff },
10728 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10729 0x00000000, 0x000000ff },
10730 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10731 0x00000000, 0xffffffff },
10732 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10733 0x00000000, 0x000000ff },
10734 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10735 0x00000000, 0xffffffff },
10736 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10737 0x00000000, 0xffffffff },
10738 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10739 0x00000000, 0xffffffff },
10740 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10741 0x00000000, 0x000000ff },
10742 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10743 0x00000000, 0xffffffff },
10744 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10745 0x00000000, 0x000000ff },
10746 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10747 0x00000000, 0xffffffff },
10748 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10749 0x00000000, 0xffffffff },
10750 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10751 0x00000000, 0xffffffff },
10752 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10753 0x00000000, 0xffffffff },
10754 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10755 0x00000000, 0xffffffff },
10756 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10757 0xffffffff, 0x00000000 },
10758 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10759 0xffffffff, 0x00000000 },
10761 /* Buffer Manager Control Registers. */
10762 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10763 0x00000000, 0x007fff80 },
10764 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10765 0x00000000, 0x007fffff },
10766 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10767 0x00000000, 0x0000003f },
10768 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10769 0x00000000, 0x000001ff },
10770 { BUFMGR_MB_HIGH_WATER, 0x0000,
10771 0x00000000, 0x000001ff },
10772 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10773 0xffffffff, 0x00000000 },
10774 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10775 0xffffffff, 0x00000000 },
10777 /* Mailbox Registers */
10778 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10779 0x00000000, 0x000001ff },
10780 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10781 0x00000000, 0x000001ff },
10782 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10783 0x00000000, 0x000007ff },
10784 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10785 0x00000000, 0x000001ff },
10787 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10790 is_5705 = is_5750 = 0;
10791 if (tg3_flag(tp, 5705_PLUS)) {
10793 if (tg3_flag(tp, 5750_PLUS))
10797 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10798 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10801 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10804 if (tg3_flag(tp, IS_5788) &&
10805 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10808 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10811 offset = (u32) reg_tbl[i].offset;
10812 read_mask = reg_tbl[i].read_mask;
10813 write_mask = reg_tbl[i].write_mask;
10815 /* Save the original register content */
10816 save_val = tr32(offset);
10818 /* Determine the read-only value. */
10819 read_val = save_val & read_mask;
10821 /* Write zero to the register, then make sure the read-only bits
10822 * are not changed and the read/write bits are all zeros.
10826 val = tr32(offset);
10828 /* Test the read-only and read/write bits. */
10829 if (((val & read_mask) != read_val) || (val & write_mask))
10832 /* Write ones to all the bits defined by RdMask and WrMask, then
10833 * make sure the read-only bits are not changed and the
10834 * read/write bits are all ones.
10836 tw32(offset, read_mask | write_mask);
10838 val = tr32(offset);
10840 /* Test the read-only bits. */
10841 if ((val & read_mask) != read_val)
10844 /* Test the read/write bits. */
10845 if ((val & write_mask) != write_mask)
10848 tw32(offset, save_val);
10854 if (netif_msg_hw(tp))
10855 netdev_err(tp->dev,
10856 "Register test failed at offset %x\n", offset);
10857 tw32(offset, save_val);
10861 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10863 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10867 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10868 for (j = 0; j < len; j += 4) {
10871 tg3_write_mem(tp, offset + j, test_pattern[i]);
10872 tg3_read_mem(tp, offset + j, &val);
10873 if (val != test_pattern[i])
10880 static int tg3_test_memory(struct tg3 *tp)
10882 static struct mem_entry {
10885 } mem_tbl_570x[] = {
10886 { 0x00000000, 0x00b50},
10887 { 0x00002000, 0x1c000},
10888 { 0xffffffff, 0x00000}
10889 }, mem_tbl_5705[] = {
10890 { 0x00000100, 0x0000c},
10891 { 0x00000200, 0x00008},
10892 { 0x00004000, 0x00800},
10893 { 0x00006000, 0x01000},
10894 { 0x00008000, 0x02000},
10895 { 0x00010000, 0x0e000},
10896 { 0xffffffff, 0x00000}
10897 }, mem_tbl_5755[] = {
10898 { 0x00000200, 0x00008},
10899 { 0x00004000, 0x00800},
10900 { 0x00006000, 0x00800},
10901 { 0x00008000, 0x02000},
10902 { 0x00010000, 0x0c000},
10903 { 0xffffffff, 0x00000}
10904 }, mem_tbl_5906[] = {
10905 { 0x00000200, 0x00008},
10906 { 0x00004000, 0x00400},
10907 { 0x00006000, 0x00400},
10908 { 0x00008000, 0x01000},
10909 { 0x00010000, 0x01000},
10910 { 0xffffffff, 0x00000}
10911 }, mem_tbl_5717[] = {
10912 { 0x00000200, 0x00008},
10913 { 0x00010000, 0x0a000},
10914 { 0x00020000, 0x13c00},
10915 { 0xffffffff, 0x00000}
10916 }, mem_tbl_57765[] = {
10917 { 0x00000200, 0x00008},
10918 { 0x00004000, 0x00800},
10919 { 0x00006000, 0x09800},
10920 { 0x00010000, 0x0a000},
10921 { 0xffffffff, 0x00000}
10923 struct mem_entry *mem_tbl;
10927 if (tg3_flag(tp, 5717_PLUS))
10928 mem_tbl = mem_tbl_5717;
10929 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10930 mem_tbl = mem_tbl_57765;
10931 else if (tg3_flag(tp, 5755_PLUS))
10932 mem_tbl = mem_tbl_5755;
10933 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10934 mem_tbl = mem_tbl_5906;
10935 else if (tg3_flag(tp, 5705_PLUS))
10936 mem_tbl = mem_tbl_5705;
10938 mem_tbl = mem_tbl_570x;
10940 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10941 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10949 #define TG3_MAC_LOOPBACK 0
10950 #define TG3_PHY_LOOPBACK 1
10951 #define TG3_TSO_LOOPBACK 2
10953 #define TG3_TSO_MSS 500
10955 #define TG3_TSO_IP_HDR_LEN 20
10956 #define TG3_TSO_TCP_HDR_LEN 20
10957 #define TG3_TSO_TCP_OPT_LEN 12
10959 static const u8 tg3_tso_header[] = {
10961 0x45, 0x00, 0x00, 0x00,
10962 0x00, 0x00, 0x40, 0x00,
10963 0x40, 0x06, 0x00, 0x00,
10964 0x0a, 0x00, 0x00, 0x01,
10965 0x0a, 0x00, 0x00, 0x02,
10966 0x0d, 0x00, 0xe0, 0x00,
10967 0x00, 0x00, 0x01, 0x00,
10968 0x00, 0x00, 0x02, 0x00,
10969 0x80, 0x10, 0x10, 0x00,
10970 0x14, 0x09, 0x00, 0x00,
10971 0x01, 0x01, 0x08, 0x0a,
10972 0x11, 0x11, 0x11, 0x11,
10973 0x11, 0x11, 0x11, 0x11,
10976 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10978 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10979 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10980 struct sk_buff *skb, *rx_skb;
10983 int num_pkts, tx_len, rx_len, i, err;
10984 struct tg3_rx_buffer_desc *desc;
10985 struct tg3_napi *tnapi, *rnapi;
10986 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10988 tnapi = &tp->napi[0];
10989 rnapi = &tp->napi[0];
10990 if (tp->irq_cnt > 1) {
10991 if (tg3_flag(tp, ENABLE_RSS))
10992 rnapi = &tp->napi[1];
10993 if (tg3_flag(tp, ENABLE_TSS))
10994 tnapi = &tp->napi[1];
10996 coal_now = tnapi->coal_now | rnapi->coal_now;
10998 if (loopback_mode == TG3_MAC_LOOPBACK) {
10999 /* HW errata - mac loopback fails in some cases on 5780.
11000 * Normal traffic and PHY loopback are not affected by
11001 * errata. Also, the MAC loopback test is deprecated for
11002 * all newer ASIC revisions.
11004 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11005 tg3_flag(tp, CPMU_PRESENT))
11008 mac_mode = tp->mac_mode &
11009 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11010 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11011 if (!tg3_flag(tp, 5705_PLUS))
11012 mac_mode |= MAC_MODE_LINK_POLARITY;
11013 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11014 mac_mode |= MAC_MODE_PORT_MODE_MII;
11016 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11017 tw32(MAC_MODE, mac_mode);
11019 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11020 tg3_phy_fet_toggle_apd(tp, false);
11021 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11023 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11025 tg3_phy_toggle_automdix(tp, 0);
11027 tg3_writephy(tp, MII_BMCR, val);
11030 mac_mode = tp->mac_mode &
11031 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11032 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11033 tg3_writephy(tp, MII_TG3_FET_PTEST,
11034 MII_TG3_FET_PTEST_FRC_TX_LINK |
11035 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11036 /* The write needs to be flushed for the AC131 */
11037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11038 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11039 mac_mode |= MAC_MODE_PORT_MODE_MII;
11041 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11043 /* reset to prevent losing 1st rx packet intermittently */
11044 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11045 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11047 tw32_f(MAC_RX_MODE, tp->rx_mode);
11049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11050 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11051 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11052 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11053 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11054 mac_mode |= MAC_MODE_LINK_POLARITY;
11055 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11056 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11058 tw32(MAC_MODE, mac_mode);
11060 /* Wait for link */
11061 for (i = 0; i < 100; i++) {
11062 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11071 skb = netdev_alloc_skb(tp->dev, tx_len);
11075 tx_data = skb_put(skb, tx_len);
11076 memcpy(tx_data, tp->dev->dev_addr, 6);
11077 memset(tx_data + 6, 0x0, 8);
11079 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11081 if (loopback_mode == TG3_TSO_LOOPBACK) {
11082 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11084 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11085 TG3_TSO_TCP_OPT_LEN;
11087 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11088 sizeof(tg3_tso_header));
11091 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11092 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11094 /* Set the total length field in the IP header */
11095 iph->tot_len = htons((u16)(mss + hdr_len));
11097 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11098 TXD_FLAG_CPU_POST_DMA);
11100 if (tg3_flag(tp, HW_TSO_1) ||
11101 tg3_flag(tp, HW_TSO_2) ||
11102 tg3_flag(tp, HW_TSO_3)) {
11104 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11105 th = (struct tcphdr *)&tx_data[val];
11108 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11110 if (tg3_flag(tp, HW_TSO_3)) {
11111 mss |= (hdr_len & 0xc) << 12;
11112 if (hdr_len & 0x10)
11113 base_flags |= 0x00000010;
11114 base_flags |= (hdr_len & 0x3e0) << 5;
11115 } else if (tg3_flag(tp, HW_TSO_2))
11116 mss |= hdr_len << 9;
11117 else if (tg3_flag(tp, HW_TSO_1) ||
11118 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11119 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11121 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11124 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11127 data_off = ETH_HLEN;
11130 for (i = data_off; i < tx_len; i++)
11131 tx_data[i] = (u8) (i & 0xff);
11133 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11134 if (pci_dma_mapping_error(tp->pdev, map)) {
11135 dev_kfree_skb(skb);
11139 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11144 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11146 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11147 base_flags, (mss << 1) | 1);
11151 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11152 tr32_mailbox(tnapi->prodmbox);
11156 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11157 for (i = 0; i < 35; i++) {
11158 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11163 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11164 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11165 if ((tx_idx == tnapi->tx_prod) &&
11166 (rx_idx == (rx_start_idx + num_pkts)))
11170 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11171 dev_kfree_skb(skb);
11173 if (tx_idx != tnapi->tx_prod)
11176 if (rx_idx != rx_start_idx + num_pkts)
11180 while (rx_idx != rx_start_idx) {
11181 desc = &rnapi->rx_rcb[rx_start_idx++];
11182 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11183 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11185 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11186 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11189 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11192 if (loopback_mode != TG3_TSO_LOOPBACK) {
11193 if (rx_len != tx_len)
11196 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11197 if (opaque_key != RXD_OPAQUE_RING_STD)
11200 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11203 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11204 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11205 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11209 if (opaque_key == RXD_OPAQUE_RING_STD) {
11210 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11211 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11213 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11214 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11215 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11220 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11221 PCI_DMA_FROMDEVICE);
11223 for (i = data_off; i < rx_len; i++, val++) {
11224 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11231 /* tg3_free_rings will unmap and free the rx_skb */
11236 #define TG3_STD_LOOPBACK_FAILED 1
11237 #define TG3_JMB_LOOPBACK_FAILED 2
11238 #define TG3_TSO_LOOPBACK_FAILED 4
11240 #define TG3_MAC_LOOPBACK_SHIFT 0
11241 #define TG3_PHY_LOOPBACK_SHIFT 4
11242 #define TG3_LOOPBACK_FAILED 0x00000077
11244 static int tg3_test_loopback(struct tg3 *tp)
11247 u32 eee_cap, cpmuctrl = 0;
11249 if (!netif_running(tp->dev))
11250 return TG3_LOOPBACK_FAILED;
11252 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11253 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11255 err = tg3_reset_hw(tp, 1);
11257 err = TG3_LOOPBACK_FAILED;
11261 if (tg3_flag(tp, ENABLE_RSS)) {
11264 /* Reroute all rx packets to the 1st queue */
11265 for (i = MAC_RSS_INDIR_TBL_0;
11266 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11270 /* Turn off gphy autopowerdown. */
11271 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11272 tg3_phy_toggle_apd(tp, false);
11274 if (tg3_flag(tp, CPMU_PRESENT)) {
11278 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11280 /* Wait for up to 40 microseconds to acquire lock. */
11281 for (i = 0; i < 4; i++) {
11282 status = tr32(TG3_CPMU_MUTEX_GNT);
11283 if (status == CPMU_MUTEX_GNT_DRIVER)
11288 if (status != CPMU_MUTEX_GNT_DRIVER) {
11289 err = TG3_LOOPBACK_FAILED;
11293 /* Turn off link-based power management. */
11294 cpmuctrl = tr32(TG3_CPMU_CTRL);
11295 tw32(TG3_CPMU_CTRL,
11296 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11297 CPMU_CTRL_LINK_AWARE_MODE));
11300 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11301 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11303 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11304 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11305 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11307 if (tg3_flag(tp, CPMU_PRESENT)) {
11308 tw32(TG3_CPMU_CTRL, cpmuctrl);
11310 /* Release the mutex */
11311 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11314 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11315 !tg3_flag(tp, USE_PHYLIB)) {
11316 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11317 err |= TG3_STD_LOOPBACK_FAILED <<
11318 TG3_PHY_LOOPBACK_SHIFT;
11319 if (tg3_flag(tp, TSO_CAPABLE) &&
11320 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11321 err |= TG3_TSO_LOOPBACK_FAILED <<
11322 TG3_PHY_LOOPBACK_SHIFT;
11323 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11324 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11325 err |= TG3_JMB_LOOPBACK_FAILED <<
11326 TG3_PHY_LOOPBACK_SHIFT;
11329 /* Re-enable gphy autopowerdown. */
11330 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11331 tg3_phy_toggle_apd(tp, true);
11334 tp->phy_flags |= eee_cap;
11339 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11342 struct tg3 *tp = netdev_priv(dev);
11344 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11347 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11349 if (tg3_test_nvram(tp) != 0) {
11350 etest->flags |= ETH_TEST_FL_FAILED;
11353 if (tg3_test_link(tp) != 0) {
11354 etest->flags |= ETH_TEST_FL_FAILED;
11357 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11358 int err, err2 = 0, irq_sync = 0;
11360 if (netif_running(dev)) {
11362 tg3_netif_stop(tp);
11366 tg3_full_lock(tp, irq_sync);
11368 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11369 err = tg3_nvram_lock(tp);
11370 tg3_halt_cpu(tp, RX_CPU_BASE);
11371 if (!tg3_flag(tp, 5705_PLUS))
11372 tg3_halt_cpu(tp, TX_CPU_BASE);
11374 tg3_nvram_unlock(tp);
11376 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11379 if (tg3_test_registers(tp) != 0) {
11380 etest->flags |= ETH_TEST_FL_FAILED;
11383 if (tg3_test_memory(tp) != 0) {
11384 etest->flags |= ETH_TEST_FL_FAILED;
11387 if ((data[4] = tg3_test_loopback(tp)) != 0)
11388 etest->flags |= ETH_TEST_FL_FAILED;
11390 tg3_full_unlock(tp);
11392 if (tg3_test_interrupt(tp) != 0) {
11393 etest->flags |= ETH_TEST_FL_FAILED;
11397 tg3_full_lock(tp, 0);
11399 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11400 if (netif_running(dev)) {
11401 tg3_flag_set(tp, INIT_COMPLETE);
11402 err2 = tg3_restart_hw(tp, 1);
11404 tg3_netif_start(tp);
11407 tg3_full_unlock(tp);
11409 if (irq_sync && !err2)
11412 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11413 tg3_power_down(tp);
11417 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11419 struct mii_ioctl_data *data = if_mii(ifr);
11420 struct tg3 *tp = netdev_priv(dev);
11423 if (tg3_flag(tp, USE_PHYLIB)) {
11424 struct phy_device *phydev;
11425 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11427 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11428 return phy_mii_ioctl(phydev, ifr, cmd);
11433 data->phy_id = tp->phy_addr;
11436 case SIOCGMIIREG: {
11439 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11440 break; /* We have no PHY */
11442 if (!netif_running(dev))
11445 spin_lock_bh(&tp->lock);
11446 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11447 spin_unlock_bh(&tp->lock);
11449 data->val_out = mii_regval;
11455 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11456 break; /* We have no PHY */
11458 if (!netif_running(dev))
11461 spin_lock_bh(&tp->lock);
11462 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11463 spin_unlock_bh(&tp->lock);
11471 return -EOPNOTSUPP;
11474 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11476 struct tg3 *tp = netdev_priv(dev);
11478 memcpy(ec, &tp->coal, sizeof(*ec));
11482 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11484 struct tg3 *tp = netdev_priv(dev);
11485 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11486 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11488 if (!tg3_flag(tp, 5705_PLUS)) {
11489 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11490 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11491 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11492 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11495 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11496 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11497 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11498 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11499 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11500 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11501 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11502 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11503 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11504 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11507 /* No rx interrupts will be generated if both are zero */
11508 if ((ec->rx_coalesce_usecs == 0) &&
11509 (ec->rx_max_coalesced_frames == 0))
11512 /* No tx interrupts will be generated if both are zero */
11513 if ((ec->tx_coalesce_usecs == 0) &&
11514 (ec->tx_max_coalesced_frames == 0))
11517 /* Only copy relevant parameters, ignore all others. */
11518 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11519 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11520 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11521 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11522 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11523 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11524 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11525 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11526 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11528 if (netif_running(dev)) {
11529 tg3_full_lock(tp, 0);
11530 __tg3_set_coalesce(tp, &tp->coal);
11531 tg3_full_unlock(tp);
11536 static const struct ethtool_ops tg3_ethtool_ops = {
11537 .get_settings = tg3_get_settings,
11538 .set_settings = tg3_set_settings,
11539 .get_drvinfo = tg3_get_drvinfo,
11540 .get_regs_len = tg3_get_regs_len,
11541 .get_regs = tg3_get_regs,
11542 .get_wol = tg3_get_wol,
11543 .set_wol = tg3_set_wol,
11544 .get_msglevel = tg3_get_msglevel,
11545 .set_msglevel = tg3_set_msglevel,
11546 .nway_reset = tg3_nway_reset,
11547 .get_link = ethtool_op_get_link,
11548 .get_eeprom_len = tg3_get_eeprom_len,
11549 .get_eeprom = tg3_get_eeprom,
11550 .set_eeprom = tg3_set_eeprom,
11551 .get_ringparam = tg3_get_ringparam,
11552 .set_ringparam = tg3_set_ringparam,
11553 .get_pauseparam = tg3_get_pauseparam,
11554 .set_pauseparam = tg3_set_pauseparam,
11555 .self_test = tg3_self_test,
11556 .get_strings = tg3_get_strings,
11557 .set_phys_id = tg3_set_phys_id,
11558 .get_ethtool_stats = tg3_get_ethtool_stats,
11559 .get_coalesce = tg3_get_coalesce,
11560 .set_coalesce = tg3_set_coalesce,
11561 .get_sset_count = tg3_get_sset_count,
11564 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11566 u32 cursize, val, magic;
11568 tp->nvram_size = EEPROM_CHIP_SIZE;
11570 if (tg3_nvram_read(tp, 0, &magic) != 0)
11573 if ((magic != TG3_EEPROM_MAGIC) &&
11574 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11575 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11579 * Size the chip by reading offsets at increasing powers of two.
11580 * When we encounter our validation signature, we know the addressing
11581 * has wrapped around, and thus have our chip size.
11585 while (cursize < tp->nvram_size) {
11586 if (tg3_nvram_read(tp, cursize, &val) != 0)
11595 tp->nvram_size = cursize;
11598 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11602 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11605 /* Selfboot format */
11606 if (val != TG3_EEPROM_MAGIC) {
11607 tg3_get_eeprom_size(tp);
11611 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11613 /* This is confusing. We want to operate on the
11614 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11615 * call will read from NVRAM and byteswap the data
11616 * according to the byteswapping settings for all
11617 * other register accesses. This ensures the data we
11618 * want will always reside in the lower 16-bits.
11619 * However, the data in NVRAM is in LE format, which
11620 * means the data from the NVRAM read will always be
11621 * opposite the endianness of the CPU. The 16-bit
11622 * byteswap then brings the data to CPU endianness.
11624 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11628 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11631 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11635 nvcfg1 = tr32(NVRAM_CFG1);
11636 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11637 tg3_flag_set(tp, FLASH);
11639 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11640 tw32(NVRAM_CFG1, nvcfg1);
11643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11644 tg3_flag(tp, 5780_CLASS)) {
11645 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11646 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11647 tp->nvram_jedecnum = JEDEC_ATMEL;
11648 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11649 tg3_flag_set(tp, NVRAM_BUFFERED);
11651 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11652 tp->nvram_jedecnum = JEDEC_ATMEL;
11653 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11655 case FLASH_VENDOR_ATMEL_EEPROM:
11656 tp->nvram_jedecnum = JEDEC_ATMEL;
11657 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11658 tg3_flag_set(tp, NVRAM_BUFFERED);
11660 case FLASH_VENDOR_ST:
11661 tp->nvram_jedecnum = JEDEC_ST;
11662 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11663 tg3_flag_set(tp, NVRAM_BUFFERED);
11665 case FLASH_VENDOR_SAIFUN:
11666 tp->nvram_jedecnum = JEDEC_SAIFUN;
11667 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11669 case FLASH_VENDOR_SST_SMALL:
11670 case FLASH_VENDOR_SST_LARGE:
11671 tp->nvram_jedecnum = JEDEC_SST;
11672 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11676 tp->nvram_jedecnum = JEDEC_ATMEL;
11677 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11678 tg3_flag_set(tp, NVRAM_BUFFERED);
11682 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11684 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11685 case FLASH_5752PAGE_SIZE_256:
11686 tp->nvram_pagesize = 256;
11688 case FLASH_5752PAGE_SIZE_512:
11689 tp->nvram_pagesize = 512;
11691 case FLASH_5752PAGE_SIZE_1K:
11692 tp->nvram_pagesize = 1024;
11694 case FLASH_5752PAGE_SIZE_2K:
11695 tp->nvram_pagesize = 2048;
11697 case FLASH_5752PAGE_SIZE_4K:
11698 tp->nvram_pagesize = 4096;
11700 case FLASH_5752PAGE_SIZE_264:
11701 tp->nvram_pagesize = 264;
11703 case FLASH_5752PAGE_SIZE_528:
11704 tp->nvram_pagesize = 528;
11709 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11713 nvcfg1 = tr32(NVRAM_CFG1);
11715 /* NVRAM protection for TPM */
11716 if (nvcfg1 & (1 << 27))
11717 tg3_flag_set(tp, PROTECTED_NVRAM);
11719 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11720 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11721 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11722 tp->nvram_jedecnum = JEDEC_ATMEL;
11723 tg3_flag_set(tp, NVRAM_BUFFERED);
11725 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11726 tp->nvram_jedecnum = JEDEC_ATMEL;
11727 tg3_flag_set(tp, NVRAM_BUFFERED);
11728 tg3_flag_set(tp, FLASH);
11730 case FLASH_5752VENDOR_ST_M45PE10:
11731 case FLASH_5752VENDOR_ST_M45PE20:
11732 case FLASH_5752VENDOR_ST_M45PE40:
11733 tp->nvram_jedecnum = JEDEC_ST;
11734 tg3_flag_set(tp, NVRAM_BUFFERED);
11735 tg3_flag_set(tp, FLASH);
11739 if (tg3_flag(tp, FLASH)) {
11740 tg3_nvram_get_pagesize(tp, nvcfg1);
11742 /* For eeprom, set pagesize to maximum eeprom size */
11743 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11745 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11746 tw32(NVRAM_CFG1, nvcfg1);
11750 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11752 u32 nvcfg1, protect = 0;
11754 nvcfg1 = tr32(NVRAM_CFG1);
11756 /* NVRAM protection for TPM */
11757 if (nvcfg1 & (1 << 27)) {
11758 tg3_flag_set(tp, PROTECTED_NVRAM);
11762 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11764 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11765 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11766 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11767 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11768 tp->nvram_jedecnum = JEDEC_ATMEL;
11769 tg3_flag_set(tp, NVRAM_BUFFERED);
11770 tg3_flag_set(tp, FLASH);
11771 tp->nvram_pagesize = 264;
11772 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11773 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11774 tp->nvram_size = (protect ? 0x3e200 :
11775 TG3_NVRAM_SIZE_512KB);
11776 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11777 tp->nvram_size = (protect ? 0x1f200 :
11778 TG3_NVRAM_SIZE_256KB);
11780 tp->nvram_size = (protect ? 0x1f200 :
11781 TG3_NVRAM_SIZE_128KB);
11783 case FLASH_5752VENDOR_ST_M45PE10:
11784 case FLASH_5752VENDOR_ST_M45PE20:
11785 case FLASH_5752VENDOR_ST_M45PE40:
11786 tp->nvram_jedecnum = JEDEC_ST;
11787 tg3_flag_set(tp, NVRAM_BUFFERED);
11788 tg3_flag_set(tp, FLASH);
11789 tp->nvram_pagesize = 256;
11790 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11791 tp->nvram_size = (protect ?
11792 TG3_NVRAM_SIZE_64KB :
11793 TG3_NVRAM_SIZE_128KB);
11794 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11795 tp->nvram_size = (protect ?
11796 TG3_NVRAM_SIZE_64KB :
11797 TG3_NVRAM_SIZE_256KB);
11799 tp->nvram_size = (protect ?
11800 TG3_NVRAM_SIZE_128KB :
11801 TG3_NVRAM_SIZE_512KB);
11806 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11810 nvcfg1 = tr32(NVRAM_CFG1);
11812 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11813 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11814 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11815 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11816 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11817 tp->nvram_jedecnum = JEDEC_ATMEL;
11818 tg3_flag_set(tp, NVRAM_BUFFERED);
11819 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11821 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11822 tw32(NVRAM_CFG1, nvcfg1);
11824 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11825 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11826 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11827 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11828 tp->nvram_jedecnum = JEDEC_ATMEL;
11829 tg3_flag_set(tp, NVRAM_BUFFERED);
11830 tg3_flag_set(tp, FLASH);
11831 tp->nvram_pagesize = 264;
11833 case FLASH_5752VENDOR_ST_M45PE10:
11834 case FLASH_5752VENDOR_ST_M45PE20:
11835 case FLASH_5752VENDOR_ST_M45PE40:
11836 tp->nvram_jedecnum = JEDEC_ST;
11837 tg3_flag_set(tp, NVRAM_BUFFERED);
11838 tg3_flag_set(tp, FLASH);
11839 tp->nvram_pagesize = 256;
11844 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11846 u32 nvcfg1, protect = 0;
11848 nvcfg1 = tr32(NVRAM_CFG1);
11850 /* NVRAM protection for TPM */
11851 if (nvcfg1 & (1 << 27)) {
11852 tg3_flag_set(tp, PROTECTED_NVRAM);
11856 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11858 case FLASH_5761VENDOR_ATMEL_ADB021D:
11859 case FLASH_5761VENDOR_ATMEL_ADB041D:
11860 case FLASH_5761VENDOR_ATMEL_ADB081D:
11861 case FLASH_5761VENDOR_ATMEL_ADB161D:
11862 case FLASH_5761VENDOR_ATMEL_MDB021D:
11863 case FLASH_5761VENDOR_ATMEL_MDB041D:
11864 case FLASH_5761VENDOR_ATMEL_MDB081D:
11865 case FLASH_5761VENDOR_ATMEL_MDB161D:
11866 tp->nvram_jedecnum = JEDEC_ATMEL;
11867 tg3_flag_set(tp, NVRAM_BUFFERED);
11868 tg3_flag_set(tp, FLASH);
11869 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11870 tp->nvram_pagesize = 256;
11872 case FLASH_5761VENDOR_ST_A_M45PE20:
11873 case FLASH_5761VENDOR_ST_A_M45PE40:
11874 case FLASH_5761VENDOR_ST_A_M45PE80:
11875 case FLASH_5761VENDOR_ST_A_M45PE16:
11876 case FLASH_5761VENDOR_ST_M_M45PE20:
11877 case FLASH_5761VENDOR_ST_M_M45PE40:
11878 case FLASH_5761VENDOR_ST_M_M45PE80:
11879 case FLASH_5761VENDOR_ST_M_M45PE16:
11880 tp->nvram_jedecnum = JEDEC_ST;
11881 tg3_flag_set(tp, NVRAM_BUFFERED);
11882 tg3_flag_set(tp, FLASH);
11883 tp->nvram_pagesize = 256;
11888 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11891 case FLASH_5761VENDOR_ATMEL_ADB161D:
11892 case FLASH_5761VENDOR_ATMEL_MDB161D:
11893 case FLASH_5761VENDOR_ST_A_M45PE16:
11894 case FLASH_5761VENDOR_ST_M_M45PE16:
11895 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11897 case FLASH_5761VENDOR_ATMEL_ADB081D:
11898 case FLASH_5761VENDOR_ATMEL_MDB081D:
11899 case FLASH_5761VENDOR_ST_A_M45PE80:
11900 case FLASH_5761VENDOR_ST_M_M45PE80:
11901 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11903 case FLASH_5761VENDOR_ATMEL_ADB041D:
11904 case FLASH_5761VENDOR_ATMEL_MDB041D:
11905 case FLASH_5761VENDOR_ST_A_M45PE40:
11906 case FLASH_5761VENDOR_ST_M_M45PE40:
11907 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11909 case FLASH_5761VENDOR_ATMEL_ADB021D:
11910 case FLASH_5761VENDOR_ATMEL_MDB021D:
11911 case FLASH_5761VENDOR_ST_A_M45PE20:
11912 case FLASH_5761VENDOR_ST_M_M45PE20:
11913 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11919 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11921 tp->nvram_jedecnum = JEDEC_ATMEL;
11922 tg3_flag_set(tp, NVRAM_BUFFERED);
11923 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11926 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11930 nvcfg1 = tr32(NVRAM_CFG1);
11932 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11933 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11934 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11935 tp->nvram_jedecnum = JEDEC_ATMEL;
11936 tg3_flag_set(tp, NVRAM_BUFFERED);
11937 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11939 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11940 tw32(NVRAM_CFG1, nvcfg1);
11942 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11943 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11944 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11945 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11946 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11947 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11948 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11949 tp->nvram_jedecnum = JEDEC_ATMEL;
11950 tg3_flag_set(tp, NVRAM_BUFFERED);
11951 tg3_flag_set(tp, FLASH);
11953 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11954 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11955 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11956 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11957 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11959 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11960 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11961 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11963 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11964 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11965 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11969 case FLASH_5752VENDOR_ST_M45PE10:
11970 case FLASH_5752VENDOR_ST_M45PE20:
11971 case FLASH_5752VENDOR_ST_M45PE40:
11972 tp->nvram_jedecnum = JEDEC_ST;
11973 tg3_flag_set(tp, NVRAM_BUFFERED);
11974 tg3_flag_set(tp, FLASH);
11976 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11977 case FLASH_5752VENDOR_ST_M45PE10:
11978 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11980 case FLASH_5752VENDOR_ST_M45PE20:
11981 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11983 case FLASH_5752VENDOR_ST_M45PE40:
11984 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11989 tg3_flag_set(tp, NO_NVRAM);
11993 tg3_nvram_get_pagesize(tp, nvcfg1);
11994 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11995 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11999 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12003 nvcfg1 = tr32(NVRAM_CFG1);
12005 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12006 case FLASH_5717VENDOR_ATMEL_EEPROM:
12007 case FLASH_5717VENDOR_MICRO_EEPROM:
12008 tp->nvram_jedecnum = JEDEC_ATMEL;
12009 tg3_flag_set(tp, NVRAM_BUFFERED);
12010 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12012 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12013 tw32(NVRAM_CFG1, nvcfg1);
12015 case FLASH_5717VENDOR_ATMEL_MDB011D:
12016 case FLASH_5717VENDOR_ATMEL_ADB011B:
12017 case FLASH_5717VENDOR_ATMEL_ADB011D:
12018 case FLASH_5717VENDOR_ATMEL_MDB021D:
12019 case FLASH_5717VENDOR_ATMEL_ADB021B:
12020 case FLASH_5717VENDOR_ATMEL_ADB021D:
12021 case FLASH_5717VENDOR_ATMEL_45USPT:
12022 tp->nvram_jedecnum = JEDEC_ATMEL;
12023 tg3_flag_set(tp, NVRAM_BUFFERED);
12024 tg3_flag_set(tp, FLASH);
12026 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12027 case FLASH_5717VENDOR_ATMEL_MDB021D:
12028 /* Detect size with tg3_nvram_get_size() */
12030 case FLASH_5717VENDOR_ATMEL_ADB021B:
12031 case FLASH_5717VENDOR_ATMEL_ADB021D:
12032 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12035 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12039 case FLASH_5717VENDOR_ST_M_M25PE10:
12040 case FLASH_5717VENDOR_ST_A_M25PE10:
12041 case FLASH_5717VENDOR_ST_M_M45PE10:
12042 case FLASH_5717VENDOR_ST_A_M45PE10:
12043 case FLASH_5717VENDOR_ST_M_M25PE20:
12044 case FLASH_5717VENDOR_ST_A_M25PE20:
12045 case FLASH_5717VENDOR_ST_M_M45PE20:
12046 case FLASH_5717VENDOR_ST_A_M45PE20:
12047 case FLASH_5717VENDOR_ST_25USPT:
12048 case FLASH_5717VENDOR_ST_45USPT:
12049 tp->nvram_jedecnum = JEDEC_ST;
12050 tg3_flag_set(tp, NVRAM_BUFFERED);
12051 tg3_flag_set(tp, FLASH);
12053 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12054 case FLASH_5717VENDOR_ST_M_M25PE20:
12055 case FLASH_5717VENDOR_ST_M_M45PE20:
12056 /* Detect size with tg3_nvram_get_size() */
12058 case FLASH_5717VENDOR_ST_A_M25PE20:
12059 case FLASH_5717VENDOR_ST_A_M45PE20:
12060 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12063 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12068 tg3_flag_set(tp, NO_NVRAM);
12072 tg3_nvram_get_pagesize(tp, nvcfg1);
12073 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12074 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12077 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12079 u32 nvcfg1, nvmpinstrp;
12081 nvcfg1 = tr32(NVRAM_CFG1);
12082 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12084 switch (nvmpinstrp) {
12085 case FLASH_5720_EEPROM_HD:
12086 case FLASH_5720_EEPROM_LD:
12087 tp->nvram_jedecnum = JEDEC_ATMEL;
12088 tg3_flag_set(tp, NVRAM_BUFFERED);
12090 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12091 tw32(NVRAM_CFG1, nvcfg1);
12092 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12093 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12095 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12097 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12098 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12099 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12100 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12101 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12102 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12103 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12104 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12105 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12106 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12107 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12108 case FLASH_5720VENDOR_ATMEL_45USPT:
12109 tp->nvram_jedecnum = JEDEC_ATMEL;
12110 tg3_flag_set(tp, NVRAM_BUFFERED);
12111 tg3_flag_set(tp, FLASH);
12113 switch (nvmpinstrp) {
12114 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12115 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12116 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12117 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12119 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12120 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12121 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12122 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12124 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12125 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12126 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12129 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12133 case FLASH_5720VENDOR_M_ST_M25PE10:
12134 case FLASH_5720VENDOR_M_ST_M45PE10:
12135 case FLASH_5720VENDOR_A_ST_M25PE10:
12136 case FLASH_5720VENDOR_A_ST_M45PE10:
12137 case FLASH_5720VENDOR_M_ST_M25PE20:
12138 case FLASH_5720VENDOR_M_ST_M45PE20:
12139 case FLASH_5720VENDOR_A_ST_M25PE20:
12140 case FLASH_5720VENDOR_A_ST_M45PE20:
12141 case FLASH_5720VENDOR_M_ST_M25PE40:
12142 case FLASH_5720VENDOR_M_ST_M45PE40:
12143 case FLASH_5720VENDOR_A_ST_M25PE40:
12144 case FLASH_5720VENDOR_A_ST_M45PE40:
12145 case FLASH_5720VENDOR_M_ST_M25PE80:
12146 case FLASH_5720VENDOR_M_ST_M45PE80:
12147 case FLASH_5720VENDOR_A_ST_M25PE80:
12148 case FLASH_5720VENDOR_A_ST_M45PE80:
12149 case FLASH_5720VENDOR_ST_25USPT:
12150 case FLASH_5720VENDOR_ST_45USPT:
12151 tp->nvram_jedecnum = JEDEC_ST;
12152 tg3_flag_set(tp, NVRAM_BUFFERED);
12153 tg3_flag_set(tp, FLASH);
12155 switch (nvmpinstrp) {
12156 case FLASH_5720VENDOR_M_ST_M25PE20:
12157 case FLASH_5720VENDOR_M_ST_M45PE20:
12158 case FLASH_5720VENDOR_A_ST_M25PE20:
12159 case FLASH_5720VENDOR_A_ST_M45PE20:
12160 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12162 case FLASH_5720VENDOR_M_ST_M25PE40:
12163 case FLASH_5720VENDOR_M_ST_M45PE40:
12164 case FLASH_5720VENDOR_A_ST_M25PE40:
12165 case FLASH_5720VENDOR_A_ST_M45PE40:
12166 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12168 case FLASH_5720VENDOR_M_ST_M25PE80:
12169 case FLASH_5720VENDOR_M_ST_M45PE80:
12170 case FLASH_5720VENDOR_A_ST_M25PE80:
12171 case FLASH_5720VENDOR_A_ST_M45PE80:
12172 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12175 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12180 tg3_flag_set(tp, NO_NVRAM);
12184 tg3_nvram_get_pagesize(tp, nvcfg1);
12185 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12186 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12189 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12190 static void __devinit tg3_nvram_init(struct tg3 *tp)
12192 tw32_f(GRC_EEPROM_ADDR,
12193 (EEPROM_ADDR_FSM_RESET |
12194 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12195 EEPROM_ADDR_CLKPERD_SHIFT)));
12199 /* Enable seeprom accesses. */
12200 tw32_f(GRC_LOCAL_CTRL,
12201 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12204 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12205 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12206 tg3_flag_set(tp, NVRAM);
12208 if (tg3_nvram_lock(tp)) {
12209 netdev_warn(tp->dev,
12210 "Cannot get nvram lock, %s failed\n",
12214 tg3_enable_nvram_access(tp);
12216 tp->nvram_size = 0;
12218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12219 tg3_get_5752_nvram_info(tp);
12220 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12221 tg3_get_5755_nvram_info(tp);
12222 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12225 tg3_get_5787_nvram_info(tp);
12226 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12227 tg3_get_5761_nvram_info(tp);
12228 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12229 tg3_get_5906_nvram_info(tp);
12230 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12231 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12232 tg3_get_57780_nvram_info(tp);
12233 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12235 tg3_get_5717_nvram_info(tp);
12236 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12237 tg3_get_5720_nvram_info(tp);
12239 tg3_get_nvram_info(tp);
12241 if (tp->nvram_size == 0)
12242 tg3_get_nvram_size(tp);
12244 tg3_disable_nvram_access(tp);
12245 tg3_nvram_unlock(tp);
12248 tg3_flag_clear(tp, NVRAM);
12249 tg3_flag_clear(tp, NVRAM_BUFFERED);
12251 tg3_get_eeprom_size(tp);
12255 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12256 u32 offset, u32 len, u8 *buf)
12261 for (i = 0; i < len; i += 4) {
12267 memcpy(&data, buf + i, 4);
12270 * The SEEPROM interface expects the data to always be opposite
12271 * the native endian format. We accomplish this by reversing
12272 * all the operations that would have been performed on the
12273 * data from a call to tg3_nvram_read_be32().
12275 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12277 val = tr32(GRC_EEPROM_ADDR);
12278 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12280 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12282 tw32(GRC_EEPROM_ADDR, val |
12283 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12284 (addr & EEPROM_ADDR_ADDR_MASK) |
12285 EEPROM_ADDR_START |
12286 EEPROM_ADDR_WRITE);
12288 for (j = 0; j < 1000; j++) {
12289 val = tr32(GRC_EEPROM_ADDR);
12291 if (val & EEPROM_ADDR_COMPLETE)
12295 if (!(val & EEPROM_ADDR_COMPLETE)) {
12304 /* offset and length are dword aligned */
12305 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12309 u32 pagesize = tp->nvram_pagesize;
12310 u32 pagemask = pagesize - 1;
12314 tmp = kmalloc(pagesize, GFP_KERNEL);
12320 u32 phy_addr, page_off, size;
12322 phy_addr = offset & ~pagemask;
12324 for (j = 0; j < pagesize; j += 4) {
12325 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12326 (__be32 *) (tmp + j));
12333 page_off = offset & pagemask;
12340 memcpy(tmp + page_off, buf, size);
12342 offset = offset + (pagesize - page_off);
12344 tg3_enable_nvram_access(tp);
12347 * Before we can erase the flash page, we need
12348 * to issue a special "write enable" command.
12350 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12352 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12355 /* Erase the target page */
12356 tw32(NVRAM_ADDR, phy_addr);
12358 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12359 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12361 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12364 /* Issue another write enable to start the write. */
12365 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12367 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12370 for (j = 0; j < pagesize; j += 4) {
12373 data = *((__be32 *) (tmp + j));
12375 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12377 tw32(NVRAM_ADDR, phy_addr + j);
12379 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12383 nvram_cmd |= NVRAM_CMD_FIRST;
12384 else if (j == (pagesize - 4))
12385 nvram_cmd |= NVRAM_CMD_LAST;
12387 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12394 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12395 tg3_nvram_exec_cmd(tp, nvram_cmd);
12402 /* offset and length are dword aligned */
12403 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12408 for (i = 0; i < len; i += 4, offset += 4) {
12409 u32 page_off, phy_addr, nvram_cmd;
12412 memcpy(&data, buf + i, 4);
12413 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12415 page_off = offset % tp->nvram_pagesize;
12417 phy_addr = tg3_nvram_phys_addr(tp, offset);
12419 tw32(NVRAM_ADDR, phy_addr);
12421 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12423 if (page_off == 0 || i == 0)
12424 nvram_cmd |= NVRAM_CMD_FIRST;
12425 if (page_off == (tp->nvram_pagesize - 4))
12426 nvram_cmd |= NVRAM_CMD_LAST;
12428 if (i == (len - 4))
12429 nvram_cmd |= NVRAM_CMD_LAST;
12431 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12432 !tg3_flag(tp, 5755_PLUS) &&
12433 (tp->nvram_jedecnum == JEDEC_ST) &&
12434 (nvram_cmd & NVRAM_CMD_FIRST)) {
12436 if ((ret = tg3_nvram_exec_cmd(tp,
12437 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12442 if (!tg3_flag(tp, FLASH)) {
12443 /* We always do complete word writes to eeprom. */
12444 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12447 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12453 /* offset and length are dword aligned */
12454 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12458 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12459 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12460 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12464 if (!tg3_flag(tp, NVRAM)) {
12465 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12469 ret = tg3_nvram_lock(tp);
12473 tg3_enable_nvram_access(tp);
12474 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12475 tw32(NVRAM_WRITE1, 0x406);
12477 grc_mode = tr32(GRC_MODE);
12478 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12480 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12481 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12484 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12488 grc_mode = tr32(GRC_MODE);
12489 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12491 tg3_disable_nvram_access(tp);
12492 tg3_nvram_unlock(tp);
12495 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12496 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12503 struct subsys_tbl_ent {
12504 u16 subsys_vendor, subsys_devid;
12508 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12509 /* Broadcom boards. */
12510 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12511 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12512 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12513 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12514 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12515 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12516 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12517 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12518 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12519 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12520 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12521 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12522 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12523 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12524 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12525 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12526 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12527 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12528 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12529 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12530 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12531 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12534 { TG3PCI_SUBVENDOR_ID_3COM,
12535 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12536 { TG3PCI_SUBVENDOR_ID_3COM,
12537 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12538 { TG3PCI_SUBVENDOR_ID_3COM,
12539 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12540 { TG3PCI_SUBVENDOR_ID_3COM,
12541 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12542 { TG3PCI_SUBVENDOR_ID_3COM,
12543 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12546 { TG3PCI_SUBVENDOR_ID_DELL,
12547 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12548 { TG3PCI_SUBVENDOR_ID_DELL,
12549 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12550 { TG3PCI_SUBVENDOR_ID_DELL,
12551 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12552 { TG3PCI_SUBVENDOR_ID_DELL,
12553 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12555 /* Compaq boards. */
12556 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12557 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12558 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12559 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12560 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12561 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12562 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12563 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12564 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12565 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12568 { TG3PCI_SUBVENDOR_ID_IBM,
12569 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12572 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12576 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12577 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12578 tp->pdev->subsystem_vendor) &&
12579 (subsys_id_to_phy_id[i].subsys_devid ==
12580 tp->pdev->subsystem_device))
12581 return &subsys_id_to_phy_id[i];
12586 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12591 /* On some early chips the SRAM cannot be accessed in D3hot state,
12592 * so need make sure we're in D0.
12594 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12595 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12596 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12599 /* Make sure register accesses (indirect or otherwise)
12600 * will function correctly.
12602 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12603 tp->misc_host_ctrl);
12605 /* The memory arbiter has to be enabled in order for SRAM accesses
12606 * to succeed. Normally on powerup the tg3 chip firmware will make
12607 * sure it is enabled, but other entities such as system netboot
12608 * code might disable it.
12610 val = tr32(MEMARB_MODE);
12611 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12613 tp->phy_id = TG3_PHY_ID_INVALID;
12614 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12616 /* Assume an onboard device and WOL capable by default. */
12617 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12618 tg3_flag_set(tp, WOL_CAP);
12620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12621 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12622 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12623 tg3_flag_set(tp, IS_NIC);
12625 val = tr32(VCPU_CFGSHDW);
12626 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12627 tg3_flag_set(tp, ASPM_WORKAROUND);
12628 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12629 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12630 tg3_flag_set(tp, WOL_ENABLE);
12631 device_set_wakeup_enable(&tp->pdev->dev, true);
12636 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12637 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12638 u32 nic_cfg, led_cfg;
12639 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12640 int eeprom_phy_serdes = 0;
12642 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12643 tp->nic_sram_data_cfg = nic_cfg;
12645 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12646 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12647 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12648 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12649 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12650 (ver > 0) && (ver < 0x100))
12651 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12654 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12656 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12657 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12658 eeprom_phy_serdes = 1;
12660 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12661 if (nic_phy_id != 0) {
12662 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12663 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12665 eeprom_phy_id = (id1 >> 16) << 10;
12666 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12667 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12671 tp->phy_id = eeprom_phy_id;
12672 if (eeprom_phy_serdes) {
12673 if (!tg3_flag(tp, 5705_PLUS))
12674 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12676 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12679 if (tg3_flag(tp, 5750_PLUS))
12680 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12681 SHASTA_EXT_LED_MODE_MASK);
12683 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12687 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12688 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12691 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12692 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12695 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12696 tp->led_ctrl = LED_CTRL_MODE_MAC;
12698 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12699 * read on some older 5700/5701 bootcode.
12701 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12703 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12705 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12709 case SHASTA_EXT_LED_SHARED:
12710 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12711 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12712 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12713 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12714 LED_CTRL_MODE_PHY_2);
12717 case SHASTA_EXT_LED_MAC:
12718 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12721 case SHASTA_EXT_LED_COMBO:
12722 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12723 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12724 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12725 LED_CTRL_MODE_PHY_2);
12730 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12732 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12733 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12735 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12736 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12738 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12739 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12740 if ((tp->pdev->subsystem_vendor ==
12741 PCI_VENDOR_ID_ARIMA) &&
12742 (tp->pdev->subsystem_device == 0x205a ||
12743 tp->pdev->subsystem_device == 0x2063))
12744 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12746 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12747 tg3_flag_set(tp, IS_NIC);
12750 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12751 tg3_flag_set(tp, ENABLE_ASF);
12752 if (tg3_flag(tp, 5750_PLUS))
12753 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12756 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12757 tg3_flag(tp, 5750_PLUS))
12758 tg3_flag_set(tp, ENABLE_APE);
12760 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12761 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12762 tg3_flag_clear(tp, WOL_CAP);
12764 if (tg3_flag(tp, WOL_CAP) &&
12765 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12766 tg3_flag_set(tp, WOL_ENABLE);
12767 device_set_wakeup_enable(&tp->pdev->dev, true);
12770 if (cfg2 & (1 << 17))
12771 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12773 /* serdes signal pre-emphasis in register 0x590 set by */
12774 /* bootcode if bit 18 is set */
12775 if (cfg2 & (1 << 18))
12776 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12778 if ((tg3_flag(tp, 57765_PLUS) ||
12779 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12780 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12781 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12782 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12784 if (tg3_flag(tp, PCI_EXPRESS) &&
12785 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12786 !tg3_flag(tp, 57765_PLUS)) {
12789 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12790 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12791 tg3_flag_set(tp, ASPM_WORKAROUND);
12794 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12795 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12796 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12797 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12798 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12799 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12802 if (tg3_flag(tp, WOL_CAP))
12803 device_set_wakeup_enable(&tp->pdev->dev,
12804 tg3_flag(tp, WOL_ENABLE));
12806 device_set_wakeup_capable(&tp->pdev->dev, false);
12809 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12814 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12815 tw32(OTP_CTRL, cmd);
12817 /* Wait for up to 1 ms for command to execute. */
12818 for (i = 0; i < 100; i++) {
12819 val = tr32(OTP_STATUS);
12820 if (val & OTP_STATUS_CMD_DONE)
12825 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12828 /* Read the gphy configuration from the OTP region of the chip. The gphy
12829 * configuration is a 32-bit value that straddles the alignment boundary.
12830 * We do two 32-bit reads and then shift and merge the results.
12832 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12834 u32 bhalf_otp, thalf_otp;
12836 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12838 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12841 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12843 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12846 thalf_otp = tr32(OTP_READ_DATA);
12848 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12850 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12853 bhalf_otp = tr32(OTP_READ_DATA);
12855 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12858 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12860 u32 adv = ADVERTISED_Autoneg |
12863 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12864 adv |= ADVERTISED_1000baseT_Half |
12865 ADVERTISED_1000baseT_Full;
12867 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12868 adv |= ADVERTISED_100baseT_Half |
12869 ADVERTISED_100baseT_Full |
12870 ADVERTISED_10baseT_Half |
12871 ADVERTISED_10baseT_Full |
12874 adv |= ADVERTISED_FIBRE;
12876 tp->link_config.advertising = adv;
12877 tp->link_config.speed = SPEED_INVALID;
12878 tp->link_config.duplex = DUPLEX_INVALID;
12879 tp->link_config.autoneg = AUTONEG_ENABLE;
12880 tp->link_config.active_speed = SPEED_INVALID;
12881 tp->link_config.active_duplex = DUPLEX_INVALID;
12882 tp->link_config.orig_speed = SPEED_INVALID;
12883 tp->link_config.orig_duplex = DUPLEX_INVALID;
12884 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12887 static int __devinit tg3_phy_probe(struct tg3 *tp)
12889 u32 hw_phy_id_1, hw_phy_id_2;
12890 u32 hw_phy_id, hw_phy_id_masked;
12893 /* flow control autonegotiation is default behavior */
12894 tg3_flag_set(tp, PAUSE_AUTONEG);
12895 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12897 if (tg3_flag(tp, USE_PHYLIB))
12898 return tg3_phy_init(tp);
12900 /* Reading the PHY ID register can conflict with ASF
12901 * firmware access to the PHY hardware.
12904 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12905 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12907 /* Now read the physical PHY_ID from the chip and verify
12908 * that it is sane. If it doesn't look good, we fall back
12909 * to either the hard-coded table based PHY_ID and failing
12910 * that the value found in the eeprom area.
12912 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12913 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12915 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12916 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12917 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12919 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12922 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12923 tp->phy_id = hw_phy_id;
12924 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12925 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12927 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12929 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12930 /* Do nothing, phy ID already set up in
12931 * tg3_get_eeprom_hw_cfg().
12934 struct subsys_tbl_ent *p;
12936 /* No eeprom signature? Try the hardcoded
12937 * subsys device table.
12939 p = tg3_lookup_by_subsys(tp);
12943 tp->phy_id = p->phy_id;
12945 tp->phy_id == TG3_PHY_ID_BCM8002)
12946 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12950 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12951 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12952 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12953 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12954 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12955 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12957 tg3_phy_init_link_config(tp);
12959 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12960 !tg3_flag(tp, ENABLE_APE) &&
12961 !tg3_flag(tp, ENABLE_ASF)) {
12964 tg3_readphy(tp, MII_BMSR, &bmsr);
12965 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12966 (bmsr & BMSR_LSTATUS))
12967 goto skip_phy_reset;
12969 err = tg3_phy_reset(tp);
12973 tg3_phy_set_wirespeed(tp);
12975 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12976 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12977 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12978 if (!tg3_copper_is_advertising_all(tp, mask)) {
12979 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12980 tp->link_config.flowctrl);
12982 tg3_writephy(tp, MII_BMCR,
12983 BMCR_ANENABLE | BMCR_ANRESTART);
12988 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12989 err = tg3_init_5401phy_dsp(tp);
12993 err = tg3_init_5401phy_dsp(tp);
12999 static void __devinit tg3_read_vpd(struct tg3 *tp)
13002 unsigned int block_end, rosize, len;
13005 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13009 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13010 PCI_VPD_LRDT_RO_DATA);
13012 goto out_not_found;
13014 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13015 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13016 i += PCI_VPD_LRDT_TAG_SIZE;
13018 if (block_end > TG3_NVM_VPD_LEN)
13019 goto out_not_found;
13021 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13022 PCI_VPD_RO_KEYWORD_MFR_ID);
13024 len = pci_vpd_info_field_size(&vpd_data[j]);
13026 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13027 if (j + len > block_end || len != 4 ||
13028 memcmp(&vpd_data[j], "1028", 4))
13031 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13032 PCI_VPD_RO_KEYWORD_VENDOR0);
13036 len = pci_vpd_info_field_size(&vpd_data[j]);
13038 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13039 if (j + len > block_end)
13042 memcpy(tp->fw_ver, &vpd_data[j], len);
13043 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13047 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13048 PCI_VPD_RO_KEYWORD_PARTNO);
13050 goto out_not_found;
13052 len = pci_vpd_info_field_size(&vpd_data[i]);
13054 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13055 if (len > TG3_BPN_SIZE ||
13056 (len + i) > TG3_NVM_VPD_LEN)
13057 goto out_not_found;
13059 memcpy(tp->board_part_number, &vpd_data[i], len);
13063 if (tp->board_part_number[0])
13067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13068 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13069 strcpy(tp->board_part_number, "BCM5717");
13070 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13071 strcpy(tp->board_part_number, "BCM5718");
13074 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13075 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13076 strcpy(tp->board_part_number, "BCM57780");
13077 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13078 strcpy(tp->board_part_number, "BCM57760");
13079 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13080 strcpy(tp->board_part_number, "BCM57790");
13081 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13082 strcpy(tp->board_part_number, "BCM57788");
13085 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13086 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13087 strcpy(tp->board_part_number, "BCM57761");
13088 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13089 strcpy(tp->board_part_number, "BCM57765");
13090 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13091 strcpy(tp->board_part_number, "BCM57781");
13092 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13093 strcpy(tp->board_part_number, "BCM57785");
13094 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13095 strcpy(tp->board_part_number, "BCM57791");
13096 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13097 strcpy(tp->board_part_number, "BCM57795");
13100 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13101 strcpy(tp->board_part_number, "BCM95906");
13104 strcpy(tp->board_part_number, "none");
13108 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13112 if (tg3_nvram_read(tp, offset, &val) ||
13113 (val & 0xfc000000) != 0x0c000000 ||
13114 tg3_nvram_read(tp, offset + 4, &val) ||
13121 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13123 u32 val, offset, start, ver_offset;
13125 bool newver = false;
13127 if (tg3_nvram_read(tp, 0xc, &offset) ||
13128 tg3_nvram_read(tp, 0x4, &start))
13131 offset = tg3_nvram_logical_addr(tp, offset);
13133 if (tg3_nvram_read(tp, offset, &val))
13136 if ((val & 0xfc000000) == 0x0c000000) {
13137 if (tg3_nvram_read(tp, offset + 4, &val))
13144 dst_off = strlen(tp->fw_ver);
13147 if (TG3_VER_SIZE - dst_off < 16 ||
13148 tg3_nvram_read(tp, offset + 8, &ver_offset))
13151 offset = offset + ver_offset - start;
13152 for (i = 0; i < 16; i += 4) {
13154 if (tg3_nvram_read_be32(tp, offset + i, &v))
13157 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13162 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13165 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13166 TG3_NVM_BCVER_MAJSFT;
13167 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13168 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13169 "v%d.%02d", major, minor);
13173 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13175 u32 val, major, minor;
13177 /* Use native endian representation */
13178 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13181 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13182 TG3_NVM_HWSB_CFG1_MAJSFT;
13183 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13184 TG3_NVM_HWSB_CFG1_MINSFT;
13186 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13189 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13191 u32 offset, major, minor, build;
13193 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13195 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13198 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13199 case TG3_EEPROM_SB_REVISION_0:
13200 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13202 case TG3_EEPROM_SB_REVISION_2:
13203 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13205 case TG3_EEPROM_SB_REVISION_3:
13206 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13208 case TG3_EEPROM_SB_REVISION_4:
13209 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13211 case TG3_EEPROM_SB_REVISION_5:
13212 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13214 case TG3_EEPROM_SB_REVISION_6:
13215 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13221 if (tg3_nvram_read(tp, offset, &val))
13224 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13225 TG3_EEPROM_SB_EDH_BLD_SHFT;
13226 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13227 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13228 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13230 if (minor > 99 || build > 26)
13233 offset = strlen(tp->fw_ver);
13234 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13235 " v%d.%02d", major, minor);
13238 offset = strlen(tp->fw_ver);
13239 if (offset < TG3_VER_SIZE - 1)
13240 tp->fw_ver[offset] = 'a' + build - 1;
13244 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13246 u32 val, offset, start;
13249 for (offset = TG3_NVM_DIR_START;
13250 offset < TG3_NVM_DIR_END;
13251 offset += TG3_NVM_DIRENT_SIZE) {
13252 if (tg3_nvram_read(tp, offset, &val))
13255 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13259 if (offset == TG3_NVM_DIR_END)
13262 if (!tg3_flag(tp, 5705_PLUS))
13263 start = 0x08000000;
13264 else if (tg3_nvram_read(tp, offset - 4, &start))
13267 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13268 !tg3_fw_img_is_valid(tp, offset) ||
13269 tg3_nvram_read(tp, offset + 8, &val))
13272 offset += val - start;
13274 vlen = strlen(tp->fw_ver);
13276 tp->fw_ver[vlen++] = ',';
13277 tp->fw_ver[vlen++] = ' ';
13279 for (i = 0; i < 4; i++) {
13281 if (tg3_nvram_read_be32(tp, offset, &v))
13284 offset += sizeof(v);
13286 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13287 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13291 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13296 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13302 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13305 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13306 if (apedata != APE_SEG_SIG_MAGIC)
13309 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13310 if (!(apedata & APE_FW_STATUS_READY))
13313 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13315 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13316 tg3_flag_set(tp, APE_HAS_NCSI);
13322 vlen = strlen(tp->fw_ver);
13324 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13326 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13327 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13328 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13329 (apedata & APE_FW_VERSION_BLDMSK));
13332 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13335 bool vpd_vers = false;
13337 if (tp->fw_ver[0] != 0)
13340 if (tg3_flag(tp, NO_NVRAM)) {
13341 strcat(tp->fw_ver, "sb");
13345 if (tg3_nvram_read(tp, 0, &val))
13348 if (val == TG3_EEPROM_MAGIC)
13349 tg3_read_bc_ver(tp);
13350 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13351 tg3_read_sb_ver(tp, val);
13352 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13353 tg3_read_hwsb_ver(tp);
13357 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13360 tg3_read_mgmtfw_ver(tp);
13363 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13366 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13368 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13370 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13371 return TG3_RX_RET_MAX_SIZE_5717;
13372 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13373 return TG3_RX_RET_MAX_SIZE_5700;
13375 return TG3_RX_RET_MAX_SIZE_5705;
13378 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13379 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13380 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13381 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13385 static int __devinit tg3_get_invariants(struct tg3 *tp)
13388 u32 pci_state_reg, grc_misc_cfg;
13393 /* Force memory write invalidate off. If we leave it on,
13394 * then on 5700_BX chips we have to enable a workaround.
13395 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13396 * to match the cacheline size. The Broadcom driver have this
13397 * workaround but turns MWI off all the times so never uses
13398 * it. This seems to suggest that the workaround is insufficient.
13400 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13401 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13402 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13404 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13405 * has the register indirect write enable bit set before
13406 * we try to access any of the MMIO registers. It is also
13407 * critical that the PCI-X hw workaround situation is decided
13408 * before that as well.
13410 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13413 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13414 MISC_HOST_CTRL_CHIPREV_SHIFT);
13415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13416 u32 prod_id_asic_rev;
13418 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13419 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13420 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13421 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13422 pci_read_config_dword(tp->pdev,
13423 TG3PCI_GEN2_PRODID_ASICREV,
13424 &prod_id_asic_rev);
13425 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13426 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13427 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13429 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13430 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13431 pci_read_config_dword(tp->pdev,
13432 TG3PCI_GEN15_PRODID_ASICREV,
13433 &prod_id_asic_rev);
13435 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13436 &prod_id_asic_rev);
13438 tp->pci_chip_rev_id = prod_id_asic_rev;
13441 /* Wrong chip ID in 5752 A0. This code can be removed later
13442 * as A0 is not in production.
13444 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13445 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13447 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13448 * we need to disable memory and use config. cycles
13449 * only to access all registers. The 5702/03 chips
13450 * can mistakenly decode the special cycles from the
13451 * ICH chipsets as memory write cycles, causing corruption
13452 * of register and memory space. Only certain ICH bridges
13453 * will drive special cycles with non-zero data during the
13454 * address phase which can fall within the 5703's address
13455 * range. This is not an ICH bug as the PCI spec allows
13456 * non-zero address during special cycles. However, only
13457 * these ICH bridges are known to drive non-zero addresses
13458 * during special cycles.
13460 * Since special cycles do not cross PCI bridges, we only
13461 * enable this workaround if the 5703 is on the secondary
13462 * bus of these ICH bridges.
13464 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13465 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13466 static struct tg3_dev_id {
13470 } ich_chipsets[] = {
13471 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13473 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13475 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13477 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13481 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13482 struct pci_dev *bridge = NULL;
13484 while (pci_id->vendor != 0) {
13485 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13491 if (pci_id->rev != PCI_ANY_ID) {
13492 if (bridge->revision > pci_id->rev)
13495 if (bridge->subordinate &&
13496 (bridge->subordinate->number ==
13497 tp->pdev->bus->number)) {
13498 tg3_flag_set(tp, ICH_WORKAROUND);
13499 pci_dev_put(bridge);
13505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13506 static struct tg3_dev_id {
13509 } bridge_chipsets[] = {
13510 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13511 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13514 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13515 struct pci_dev *bridge = NULL;
13517 while (pci_id->vendor != 0) {
13518 bridge = pci_get_device(pci_id->vendor,
13525 if (bridge->subordinate &&
13526 (bridge->subordinate->number <=
13527 tp->pdev->bus->number) &&
13528 (bridge->subordinate->subordinate >=
13529 tp->pdev->bus->number)) {
13530 tg3_flag_set(tp, 5701_DMA_BUG);
13531 pci_dev_put(bridge);
13537 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13538 * DMA addresses > 40-bit. This bridge may have other additional
13539 * 57xx devices behind it in some 4-port NIC designs for example.
13540 * Any tg3 device found behind the bridge will also need the 40-bit
13543 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13544 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13545 tg3_flag_set(tp, 5780_CLASS);
13546 tg3_flag_set(tp, 40BIT_DMA_BUG);
13547 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13549 struct pci_dev *bridge = NULL;
13552 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13553 PCI_DEVICE_ID_SERVERWORKS_EPB,
13555 if (bridge && bridge->subordinate &&
13556 (bridge->subordinate->number <=
13557 tp->pdev->bus->number) &&
13558 (bridge->subordinate->subordinate >=
13559 tp->pdev->bus->number)) {
13560 tg3_flag_set(tp, 40BIT_DMA_BUG);
13561 pci_dev_put(bridge);
13567 /* Initialize misc host control in PCI block. */
13568 tp->misc_host_ctrl |= (misc_ctrl_reg &
13569 MISC_HOST_CTRL_CHIPREV);
13570 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13571 tp->misc_host_ctrl);
13573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13577 tp->pdev_peer = tg3_find_peer(tp);
13579 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13581 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13582 tg3_flag_set(tp, 5717_PLUS);
13584 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13585 tg3_flag(tp, 5717_PLUS))
13586 tg3_flag_set(tp, 57765_PLUS);
13588 /* Intentionally exclude ASIC_REV_5906 */
13589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13593 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13595 tg3_flag(tp, 57765_PLUS))
13596 tg3_flag_set(tp, 5755_PLUS);
13598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13600 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13601 tg3_flag(tp, 5755_PLUS) ||
13602 tg3_flag(tp, 5780_CLASS))
13603 tg3_flag_set(tp, 5750_PLUS);
13605 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13606 tg3_flag(tp, 5750_PLUS))
13607 tg3_flag_set(tp, 5705_PLUS);
13609 /* Determine TSO capabilities */
13610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13611 ; /* Do nothing. HW bug. */
13612 else if (tg3_flag(tp, 57765_PLUS))
13613 tg3_flag_set(tp, HW_TSO_3);
13614 else if (tg3_flag(tp, 5755_PLUS) ||
13615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13616 tg3_flag_set(tp, HW_TSO_2);
13617 else if (tg3_flag(tp, 5750_PLUS)) {
13618 tg3_flag_set(tp, HW_TSO_1);
13619 tg3_flag_set(tp, TSO_BUG);
13620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13621 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13622 tg3_flag_clear(tp, TSO_BUG);
13623 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13624 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13625 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13626 tg3_flag_set(tp, TSO_BUG);
13627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13628 tp->fw_needed = FIRMWARE_TG3TSO5;
13630 tp->fw_needed = FIRMWARE_TG3TSO;
13633 /* Selectively allow TSO based on operating conditions */
13634 if (tg3_flag(tp, HW_TSO_1) ||
13635 tg3_flag(tp, HW_TSO_2) ||
13636 tg3_flag(tp, HW_TSO_3) ||
13637 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13638 tg3_flag_set(tp, TSO_CAPABLE);
13640 tg3_flag_clear(tp, TSO_CAPABLE);
13641 tg3_flag_clear(tp, TSO_BUG);
13642 tp->fw_needed = NULL;
13645 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13646 tp->fw_needed = FIRMWARE_TG3;
13650 if (tg3_flag(tp, 5750_PLUS)) {
13651 tg3_flag_set(tp, SUPPORT_MSI);
13652 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13653 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13654 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13655 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13656 tp->pdev_peer == tp->pdev))
13657 tg3_flag_clear(tp, SUPPORT_MSI);
13659 if (tg3_flag(tp, 5755_PLUS) ||
13660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13661 tg3_flag_set(tp, 1SHOT_MSI);
13664 if (tg3_flag(tp, 57765_PLUS)) {
13665 tg3_flag_set(tp, SUPPORT_MSIX);
13666 tp->irq_max = TG3_IRQ_MAX_VECS;
13670 /* All chips can get confused if TX buffers
13671 * straddle the 4GB address boundary.
13673 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13675 if (tg3_flag(tp, 5755_PLUS))
13676 tg3_flag_set(tp, SHORT_DMA_BUG);
13678 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13680 if (tg3_flag(tp, 5717_PLUS))
13681 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13683 if (tg3_flag(tp, 57765_PLUS) &&
13684 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13685 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13687 if (!tg3_flag(tp, 5705_PLUS) ||
13688 tg3_flag(tp, 5780_CLASS) ||
13689 tg3_flag(tp, USE_JUMBO_BDFLAG))
13690 tg3_flag_set(tp, JUMBO_CAPABLE);
13692 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13695 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13696 if (tp->pcie_cap != 0) {
13699 tg3_flag_set(tp, PCI_EXPRESS);
13701 tp->pcie_readrq = 4096;
13702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13704 tp->pcie_readrq = 2048;
13706 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13708 pci_read_config_word(tp->pdev,
13709 tp->pcie_cap + PCI_EXP_LNKCTL,
13711 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13712 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13714 tg3_flag_clear(tp, HW_TSO_2);
13715 tg3_flag_clear(tp, TSO_CAPABLE);
13717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13718 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13719 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13720 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13721 tg3_flag_set(tp, CLKREQ_BUG);
13722 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13723 tg3_flag_set(tp, L1PLLPD_EN);
13725 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13726 tg3_flag_set(tp, PCI_EXPRESS);
13727 } else if (!tg3_flag(tp, 5705_PLUS) ||
13728 tg3_flag(tp, 5780_CLASS)) {
13729 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13730 if (!tp->pcix_cap) {
13731 dev_err(&tp->pdev->dev,
13732 "Cannot find PCI-X capability, aborting\n");
13736 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13737 tg3_flag_set(tp, PCIX_MODE);
13740 /* If we have an AMD 762 or VIA K8T800 chipset, write
13741 * reordering to the mailbox registers done by the host
13742 * controller can cause major troubles. We read back from
13743 * every mailbox register write to force the writes to be
13744 * posted to the chip in order.
13746 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13747 !tg3_flag(tp, PCI_EXPRESS))
13748 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13750 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13751 &tp->pci_cacheline_sz);
13752 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13753 &tp->pci_lat_timer);
13754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13755 tp->pci_lat_timer < 64) {
13756 tp->pci_lat_timer = 64;
13757 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13758 tp->pci_lat_timer);
13761 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13762 /* 5700 BX chips need to have their TX producer index
13763 * mailboxes written twice to workaround a bug.
13765 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13767 /* If we are in PCI-X mode, enable register write workaround.
13769 * The workaround is to use indirect register accesses
13770 * for all chip writes not to mailbox registers.
13772 if (tg3_flag(tp, PCIX_MODE)) {
13775 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13777 /* The chip can have it's power management PCI config
13778 * space registers clobbered due to this bug.
13779 * So explicitly force the chip into D0 here.
13781 pci_read_config_dword(tp->pdev,
13782 tp->pm_cap + PCI_PM_CTRL,
13784 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13785 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13786 pci_write_config_dword(tp->pdev,
13787 tp->pm_cap + PCI_PM_CTRL,
13790 /* Also, force SERR#/PERR# in PCI command. */
13791 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13792 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13793 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13797 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13798 tg3_flag_set(tp, PCI_HIGH_SPEED);
13799 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13800 tg3_flag_set(tp, PCI_32BIT);
13802 /* Chip-specific fixup from Broadcom driver */
13803 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13804 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13805 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13806 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13809 /* Default fast path register access methods */
13810 tp->read32 = tg3_read32;
13811 tp->write32 = tg3_write32;
13812 tp->read32_mbox = tg3_read32;
13813 tp->write32_mbox = tg3_write32;
13814 tp->write32_tx_mbox = tg3_write32;
13815 tp->write32_rx_mbox = tg3_write32;
13817 /* Various workaround register access methods */
13818 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13819 tp->write32 = tg3_write_indirect_reg32;
13820 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13821 (tg3_flag(tp, PCI_EXPRESS) &&
13822 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13824 * Back to back register writes can cause problems on these
13825 * chips, the workaround is to read back all reg writes
13826 * except those to mailbox regs.
13828 * See tg3_write_indirect_reg32().
13830 tp->write32 = tg3_write_flush_reg32;
13833 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13834 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13835 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13836 tp->write32_rx_mbox = tg3_write_flush_reg32;
13839 if (tg3_flag(tp, ICH_WORKAROUND)) {
13840 tp->read32 = tg3_read_indirect_reg32;
13841 tp->write32 = tg3_write_indirect_reg32;
13842 tp->read32_mbox = tg3_read_indirect_mbox;
13843 tp->write32_mbox = tg3_write_indirect_mbox;
13844 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13845 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13850 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13851 pci_cmd &= ~PCI_COMMAND_MEMORY;
13852 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13855 tp->read32_mbox = tg3_read32_mbox_5906;
13856 tp->write32_mbox = tg3_write32_mbox_5906;
13857 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13858 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13861 if (tp->write32 == tg3_write_indirect_reg32 ||
13862 (tg3_flag(tp, PCIX_MODE) &&
13863 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13864 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13865 tg3_flag_set(tp, SRAM_USE_CONFIG);
13867 /* Get eeprom hw config before calling tg3_set_power_state().
13868 * In particular, the TG3_FLAG_IS_NIC flag must be
13869 * determined before calling tg3_set_power_state() so that
13870 * we know whether or not to switch out of Vaux power.
13871 * When the flag is set, it means that GPIO1 is used for eeprom
13872 * write protect and also implies that it is a LOM where GPIOs
13873 * are not used to switch power.
13875 tg3_get_eeprom_hw_cfg(tp);
13877 if (tg3_flag(tp, ENABLE_APE)) {
13878 /* Allow reads and writes to the
13879 * APE register and memory space.
13881 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13882 PCISTATE_ALLOW_APE_SHMEM_WR |
13883 PCISTATE_ALLOW_APE_PSPACE_WR;
13884 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13892 tg3_flag(tp, 57765_PLUS))
13893 tg3_flag_set(tp, CPMU_PRESENT);
13895 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13896 * GPIO1 driven high will bring 5700's external PHY out of reset.
13897 * It is also used as eeprom write protect on LOMs.
13899 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13901 tg3_flag(tp, EEPROM_WRITE_PROT))
13902 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13903 GRC_LCLCTRL_GPIO_OUTPUT1);
13904 /* Unused GPIO3 must be driven as output on 5752 because there
13905 * are no pull-up resistors on unused GPIO pins.
13907 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13908 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13911 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13912 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13913 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13915 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13916 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13917 /* Turn off the debug UART. */
13918 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13919 if (tg3_flag(tp, IS_NIC))
13920 /* Keep VMain power. */
13921 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13922 GRC_LCLCTRL_GPIO_OUTPUT0;
13925 /* Force the chip into D0. */
13926 err = tg3_power_up(tp);
13928 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13932 /* Derive initial jumbo mode from MTU assigned in
13933 * ether_setup() via the alloc_etherdev() call
13935 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13936 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13938 /* Determine WakeOnLan speed to use. */
13939 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13940 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13941 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13942 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13943 tg3_flag_clear(tp, WOL_SPEED_100MB);
13945 tg3_flag_set(tp, WOL_SPEED_100MB);
13948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13949 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13951 /* A few boards don't want Ethernet@WireSpeed phy feature */
13952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13953 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13954 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13955 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13956 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13957 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13958 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13960 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13961 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13962 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13963 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13964 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13966 if (tg3_flag(tp, 5705_PLUS) &&
13967 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13968 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13969 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13970 !tg3_flag(tp, 57765_PLUS)) {
13971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13975 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13976 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13977 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13978 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13979 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13981 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13984 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13985 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13986 tp->phy_otp = tg3_read_otp_phycfg(tp);
13987 if (tp->phy_otp == 0)
13988 tp->phy_otp = TG3_OTP_DEFAULT;
13991 if (tg3_flag(tp, CPMU_PRESENT))
13992 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13994 tp->mi_mode = MAC_MI_MODE_BASE;
13996 tp->coalesce_mode = 0;
13997 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13998 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13999 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14001 /* Set these bits to enable statistics workaround. */
14002 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14003 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14004 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14005 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14006 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14009 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14011 tg3_flag_set(tp, USE_PHYLIB);
14013 err = tg3_mdio_init(tp);
14017 /* Initialize data/descriptor byte/word swapping. */
14018 val = tr32(GRC_MODE);
14019 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14020 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14021 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14022 GRC_MODE_B2HRX_ENABLE |
14023 GRC_MODE_HTX2B_ENABLE |
14024 GRC_MODE_HOST_STACKUP);
14026 val &= GRC_MODE_HOST_STACKUP;
14028 tw32(GRC_MODE, val | tp->grc_mode);
14030 tg3_switch_clocks(tp);
14032 /* Clear this out for sanity. */
14033 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14035 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14037 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14038 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14039 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14041 if (chiprevid == CHIPREV_ID_5701_A0 ||
14042 chiprevid == CHIPREV_ID_5701_B0 ||
14043 chiprevid == CHIPREV_ID_5701_B2 ||
14044 chiprevid == CHIPREV_ID_5701_B5) {
14045 void __iomem *sram_base;
14047 /* Write some dummy words into the SRAM status block
14048 * area, see if it reads back correctly. If the return
14049 * value is bad, force enable the PCIX workaround.
14051 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14053 writel(0x00000000, sram_base);
14054 writel(0x00000000, sram_base + 4);
14055 writel(0xffffffff, sram_base + 4);
14056 if (readl(sram_base) != 0x00000000)
14057 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14062 tg3_nvram_init(tp);
14064 grc_misc_cfg = tr32(GRC_MISC_CFG);
14065 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14068 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14069 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14070 tg3_flag_set(tp, IS_5788);
14072 if (!tg3_flag(tp, IS_5788) &&
14073 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14074 tg3_flag_set(tp, TAGGED_STATUS);
14075 if (tg3_flag(tp, TAGGED_STATUS)) {
14076 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14077 HOSTCC_MODE_CLRTICK_TXBD);
14079 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14080 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14081 tp->misc_host_ctrl);
14084 /* Preserve the APE MAC_MODE bits */
14085 if (tg3_flag(tp, ENABLE_APE))
14086 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14088 tp->mac_mode = TG3_DEF_MAC_MODE;
14090 /* these are limited to 10/100 only */
14091 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14092 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14093 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14094 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14095 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14096 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14097 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14098 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14099 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14100 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14101 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14103 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14104 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14105 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14106 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14108 err = tg3_phy_probe(tp);
14110 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14111 /* ... but do not return immediately ... */
14116 tg3_read_fw_ver(tp);
14118 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14119 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14121 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14122 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14124 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14127 /* 5700 {AX,BX} chips have a broken status block link
14128 * change bit implementation, so we must use the
14129 * status register in those cases.
14131 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14132 tg3_flag_set(tp, USE_LINKCHG_REG);
14134 tg3_flag_clear(tp, USE_LINKCHG_REG);
14136 /* The led_ctrl is set during tg3_phy_probe, here we might
14137 * have to force the link status polling mechanism based
14138 * upon subsystem IDs.
14140 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14141 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14142 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14143 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14144 tg3_flag_set(tp, USE_LINKCHG_REG);
14147 /* For all SERDES we poll the MAC status register. */
14148 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14149 tg3_flag_set(tp, POLL_SERDES);
14151 tg3_flag_clear(tp, POLL_SERDES);
14153 tp->rx_offset = NET_IP_ALIGN;
14154 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14156 tg3_flag(tp, PCIX_MODE)) {
14158 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14159 tp->rx_copy_thresh = ~(u16)0;
14163 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14164 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14165 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14167 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14169 /* Increment the rx prod index on the rx std ring by at most
14170 * 8 for these chips to workaround hw errata.
14172 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14175 tp->rx_std_max_post = 8;
14177 if (tg3_flag(tp, ASPM_WORKAROUND))
14178 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14179 PCIE_PWR_MGMT_L1_THRESH_MSK;
14184 #ifdef CONFIG_SPARC
14185 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14187 struct net_device *dev = tp->dev;
14188 struct pci_dev *pdev = tp->pdev;
14189 struct device_node *dp = pci_device_to_OF_node(pdev);
14190 const unsigned char *addr;
14193 addr = of_get_property(dp, "local-mac-address", &len);
14194 if (addr && len == 6) {
14195 memcpy(dev->dev_addr, addr, 6);
14196 memcpy(dev->perm_addr, dev->dev_addr, 6);
14202 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14204 struct net_device *dev = tp->dev;
14206 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14207 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14212 static int __devinit tg3_get_device_address(struct tg3 *tp)
14214 struct net_device *dev = tp->dev;
14215 u32 hi, lo, mac_offset;
14218 #ifdef CONFIG_SPARC
14219 if (!tg3_get_macaddr_sparc(tp))
14224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14225 tg3_flag(tp, 5780_CLASS)) {
14226 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14228 if (tg3_nvram_lock(tp))
14229 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14231 tg3_nvram_unlock(tp);
14232 } else if (tg3_flag(tp, 5717_PLUS)) {
14233 if (PCI_FUNC(tp->pdev->devfn) & 1)
14235 if (PCI_FUNC(tp->pdev->devfn) > 1)
14236 mac_offset += 0x18c;
14237 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14240 /* First try to get it from MAC address mailbox. */
14241 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14242 if ((hi >> 16) == 0x484b) {
14243 dev->dev_addr[0] = (hi >> 8) & 0xff;
14244 dev->dev_addr[1] = (hi >> 0) & 0xff;
14246 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14247 dev->dev_addr[2] = (lo >> 24) & 0xff;
14248 dev->dev_addr[3] = (lo >> 16) & 0xff;
14249 dev->dev_addr[4] = (lo >> 8) & 0xff;
14250 dev->dev_addr[5] = (lo >> 0) & 0xff;
14252 /* Some old bootcode may report a 0 MAC address in SRAM */
14253 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14256 /* Next, try NVRAM. */
14257 if (!tg3_flag(tp, NO_NVRAM) &&
14258 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14259 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14260 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14261 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14263 /* Finally just fetch it out of the MAC control regs. */
14265 hi = tr32(MAC_ADDR_0_HIGH);
14266 lo = tr32(MAC_ADDR_0_LOW);
14268 dev->dev_addr[5] = lo & 0xff;
14269 dev->dev_addr[4] = (lo >> 8) & 0xff;
14270 dev->dev_addr[3] = (lo >> 16) & 0xff;
14271 dev->dev_addr[2] = (lo >> 24) & 0xff;
14272 dev->dev_addr[1] = hi & 0xff;
14273 dev->dev_addr[0] = (hi >> 8) & 0xff;
14277 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14278 #ifdef CONFIG_SPARC
14279 if (!tg3_get_default_macaddr_sparc(tp))
14284 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14288 #define BOUNDARY_SINGLE_CACHELINE 1
14289 #define BOUNDARY_MULTI_CACHELINE 2
14291 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14293 int cacheline_size;
14297 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14299 cacheline_size = 1024;
14301 cacheline_size = (int) byte * 4;
14303 /* On 5703 and later chips, the boundary bits have no
14306 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14307 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14308 !tg3_flag(tp, PCI_EXPRESS))
14311 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14312 goal = BOUNDARY_MULTI_CACHELINE;
14314 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14315 goal = BOUNDARY_SINGLE_CACHELINE;
14321 if (tg3_flag(tp, 57765_PLUS)) {
14322 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14329 /* PCI controllers on most RISC systems tend to disconnect
14330 * when a device tries to burst across a cache-line boundary.
14331 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14333 * Unfortunately, for PCI-E there are only limited
14334 * write-side controls for this, and thus for reads
14335 * we will still get the disconnects. We'll also waste
14336 * these PCI cycles for both read and write for chips
14337 * other than 5700 and 5701 which do not implement the
14340 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14341 switch (cacheline_size) {
14346 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14347 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14348 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14350 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14351 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14356 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14357 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14361 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14362 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14365 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14366 switch (cacheline_size) {
14370 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14371 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14372 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14378 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14379 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14383 switch (cacheline_size) {
14385 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14386 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14387 DMA_RWCTRL_WRITE_BNDRY_16);
14392 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14393 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14394 DMA_RWCTRL_WRITE_BNDRY_32);
14399 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14400 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14401 DMA_RWCTRL_WRITE_BNDRY_64);
14406 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14407 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14408 DMA_RWCTRL_WRITE_BNDRY_128);
14413 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14414 DMA_RWCTRL_WRITE_BNDRY_256);
14417 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14418 DMA_RWCTRL_WRITE_BNDRY_512);
14422 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14423 DMA_RWCTRL_WRITE_BNDRY_1024);
14432 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14434 struct tg3_internal_buffer_desc test_desc;
14435 u32 sram_dma_descs;
14438 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14440 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14441 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14442 tw32(RDMAC_STATUS, 0);
14443 tw32(WDMAC_STATUS, 0);
14445 tw32(BUFMGR_MODE, 0);
14446 tw32(FTQ_RESET, 0);
14448 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14449 test_desc.addr_lo = buf_dma & 0xffffffff;
14450 test_desc.nic_mbuf = 0x00002100;
14451 test_desc.len = size;
14454 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14455 * the *second* time the tg3 driver was getting loaded after an
14458 * Broadcom tells me:
14459 * ...the DMA engine is connected to the GRC block and a DMA
14460 * reset may affect the GRC block in some unpredictable way...
14461 * The behavior of resets to individual blocks has not been tested.
14463 * Broadcom noted the GRC reset will also reset all sub-components.
14466 test_desc.cqid_sqid = (13 << 8) | 2;
14468 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14471 test_desc.cqid_sqid = (16 << 8) | 7;
14473 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14476 test_desc.flags = 0x00000005;
14478 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14481 val = *(((u32 *)&test_desc) + i);
14482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14483 sram_dma_descs + (i * sizeof(u32)));
14484 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14486 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14489 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14491 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14494 for (i = 0; i < 40; i++) {
14498 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14500 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14501 if ((val & 0xffff) == sram_dma_descs) {
14512 #define TEST_BUFFER_SIZE 0x2000
14514 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14515 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14519 static int __devinit tg3_test_dma(struct tg3 *tp)
14521 dma_addr_t buf_dma;
14522 u32 *buf, saved_dma_rwctrl;
14525 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14526 &buf_dma, GFP_KERNEL);
14532 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14533 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14535 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14537 if (tg3_flag(tp, 57765_PLUS))
14540 if (tg3_flag(tp, PCI_EXPRESS)) {
14541 /* DMA read watermark not used on PCIE */
14542 tp->dma_rwctrl |= 0x00180000;
14543 } else if (!tg3_flag(tp, PCIX_MODE)) {
14544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14546 tp->dma_rwctrl |= 0x003f0000;
14548 tp->dma_rwctrl |= 0x003f000f;
14550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14552 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14553 u32 read_water = 0x7;
14555 /* If the 5704 is behind the EPB bridge, we can
14556 * do the less restrictive ONE_DMA workaround for
14557 * better performance.
14559 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14560 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14561 tp->dma_rwctrl |= 0x8000;
14562 else if (ccval == 0x6 || ccval == 0x7)
14563 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14565 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14567 /* Set bit 23 to enable PCIX hw bug fix */
14569 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14570 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14572 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14573 /* 5780 always in PCIX mode */
14574 tp->dma_rwctrl |= 0x00144000;
14575 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14576 /* 5714 always in PCIX mode */
14577 tp->dma_rwctrl |= 0x00148000;
14579 tp->dma_rwctrl |= 0x001b000f;
14583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14584 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14585 tp->dma_rwctrl &= 0xfffffff0;
14587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14589 /* Remove this if it causes problems for some boards. */
14590 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14592 /* On 5700/5701 chips, we need to set this bit.
14593 * Otherwise the chip will issue cacheline transactions
14594 * to streamable DMA memory with not all the byte
14595 * enables turned on. This is an error on several
14596 * RISC PCI controllers, in particular sparc64.
14598 * On 5703/5704 chips, this bit has been reassigned
14599 * a different meaning. In particular, it is used
14600 * on those chips to enable a PCI-X workaround.
14602 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14605 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14608 /* Unneeded, already done by tg3_get_invariants. */
14609 tg3_switch_clocks(tp);
14612 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14613 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14616 /* It is best to perform DMA test with maximum write burst size
14617 * to expose the 5700/5701 write DMA bug.
14619 saved_dma_rwctrl = tp->dma_rwctrl;
14620 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14621 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14626 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14629 /* Send the buffer to the chip. */
14630 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14632 dev_err(&tp->pdev->dev,
14633 "%s: Buffer write failed. err = %d\n",
14639 /* validate data reached card RAM correctly. */
14640 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14642 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14643 if (le32_to_cpu(val) != p[i]) {
14644 dev_err(&tp->pdev->dev,
14645 "%s: Buffer corrupted on device! "
14646 "(%d != %d)\n", __func__, val, i);
14647 /* ret = -ENODEV here? */
14652 /* Now read it back. */
14653 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14655 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14656 "err = %d\n", __func__, ret);
14661 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14665 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14666 DMA_RWCTRL_WRITE_BNDRY_16) {
14667 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14668 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14669 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14672 dev_err(&tp->pdev->dev,
14673 "%s: Buffer corrupted on read back! "
14674 "(%d != %d)\n", __func__, p[i], i);
14680 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14686 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14687 DMA_RWCTRL_WRITE_BNDRY_16) {
14688 /* DMA test passed without adjusting DMA boundary,
14689 * now look for chipsets that are known to expose the
14690 * DMA bug without failing the test.
14692 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14693 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14694 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14696 /* Safe to use the calculated DMA boundary. */
14697 tp->dma_rwctrl = saved_dma_rwctrl;
14700 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14704 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14709 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14711 if (tg3_flag(tp, 57765_PLUS)) {
14712 tp->bufmgr_config.mbuf_read_dma_low_water =
14713 DEFAULT_MB_RDMA_LOW_WATER_5705;
14714 tp->bufmgr_config.mbuf_mac_rx_low_water =
14715 DEFAULT_MB_MACRX_LOW_WATER_57765;
14716 tp->bufmgr_config.mbuf_high_water =
14717 DEFAULT_MB_HIGH_WATER_57765;
14719 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14720 DEFAULT_MB_RDMA_LOW_WATER_5705;
14721 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14722 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14723 tp->bufmgr_config.mbuf_high_water_jumbo =
14724 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14725 } else if (tg3_flag(tp, 5705_PLUS)) {
14726 tp->bufmgr_config.mbuf_read_dma_low_water =
14727 DEFAULT_MB_RDMA_LOW_WATER_5705;
14728 tp->bufmgr_config.mbuf_mac_rx_low_water =
14729 DEFAULT_MB_MACRX_LOW_WATER_5705;
14730 tp->bufmgr_config.mbuf_high_water =
14731 DEFAULT_MB_HIGH_WATER_5705;
14732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14733 tp->bufmgr_config.mbuf_mac_rx_low_water =
14734 DEFAULT_MB_MACRX_LOW_WATER_5906;
14735 tp->bufmgr_config.mbuf_high_water =
14736 DEFAULT_MB_HIGH_WATER_5906;
14739 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14740 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14741 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14742 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14743 tp->bufmgr_config.mbuf_high_water_jumbo =
14744 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14746 tp->bufmgr_config.mbuf_read_dma_low_water =
14747 DEFAULT_MB_RDMA_LOW_WATER;
14748 tp->bufmgr_config.mbuf_mac_rx_low_water =
14749 DEFAULT_MB_MACRX_LOW_WATER;
14750 tp->bufmgr_config.mbuf_high_water =
14751 DEFAULT_MB_HIGH_WATER;
14753 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14754 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14755 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14756 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14757 tp->bufmgr_config.mbuf_high_water_jumbo =
14758 DEFAULT_MB_HIGH_WATER_JUMBO;
14761 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14762 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14765 static char * __devinit tg3_phy_string(struct tg3 *tp)
14767 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14768 case TG3_PHY_ID_BCM5400: return "5400";
14769 case TG3_PHY_ID_BCM5401: return "5401";
14770 case TG3_PHY_ID_BCM5411: return "5411";
14771 case TG3_PHY_ID_BCM5701: return "5701";
14772 case TG3_PHY_ID_BCM5703: return "5703";
14773 case TG3_PHY_ID_BCM5704: return "5704";
14774 case TG3_PHY_ID_BCM5705: return "5705";
14775 case TG3_PHY_ID_BCM5750: return "5750";
14776 case TG3_PHY_ID_BCM5752: return "5752";
14777 case TG3_PHY_ID_BCM5714: return "5714";
14778 case TG3_PHY_ID_BCM5780: return "5780";
14779 case TG3_PHY_ID_BCM5755: return "5755";
14780 case TG3_PHY_ID_BCM5787: return "5787";
14781 case TG3_PHY_ID_BCM5784: return "5784";
14782 case TG3_PHY_ID_BCM5756: return "5722/5756";
14783 case TG3_PHY_ID_BCM5906: return "5906";
14784 case TG3_PHY_ID_BCM5761: return "5761";
14785 case TG3_PHY_ID_BCM5718C: return "5718C";
14786 case TG3_PHY_ID_BCM5718S: return "5718S";
14787 case TG3_PHY_ID_BCM57765: return "57765";
14788 case TG3_PHY_ID_BCM5719C: return "5719C";
14789 case TG3_PHY_ID_BCM5720C: return "5720C";
14790 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14791 case 0: return "serdes";
14792 default: return "unknown";
14796 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14798 if (tg3_flag(tp, PCI_EXPRESS)) {
14799 strcpy(str, "PCI Express");
14801 } else if (tg3_flag(tp, PCIX_MODE)) {
14802 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14804 strcpy(str, "PCIX:");
14806 if ((clock_ctrl == 7) ||
14807 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14808 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14809 strcat(str, "133MHz");
14810 else if (clock_ctrl == 0)
14811 strcat(str, "33MHz");
14812 else if (clock_ctrl == 2)
14813 strcat(str, "50MHz");
14814 else if (clock_ctrl == 4)
14815 strcat(str, "66MHz");
14816 else if (clock_ctrl == 6)
14817 strcat(str, "100MHz");
14819 strcpy(str, "PCI:");
14820 if (tg3_flag(tp, PCI_HIGH_SPEED))
14821 strcat(str, "66MHz");
14823 strcat(str, "33MHz");
14825 if (tg3_flag(tp, PCI_32BIT))
14826 strcat(str, ":32-bit");
14828 strcat(str, ":64-bit");
14832 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14834 struct pci_dev *peer;
14835 unsigned int func, devnr = tp->pdev->devfn & ~7;
14837 for (func = 0; func < 8; func++) {
14838 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14839 if (peer && peer != tp->pdev)
14843 /* 5704 can be configured in single-port mode, set peer to
14844 * tp->pdev in that case.
14852 * We don't need to keep the refcount elevated; there's no way
14853 * to remove one half of this device without removing the other
14860 static void __devinit tg3_init_coal(struct tg3 *tp)
14862 struct ethtool_coalesce *ec = &tp->coal;
14864 memset(ec, 0, sizeof(*ec));
14865 ec->cmd = ETHTOOL_GCOALESCE;
14866 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14867 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14868 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14869 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14870 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14871 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14872 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14873 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14874 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14876 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14877 HOSTCC_MODE_CLRTICK_TXBD)) {
14878 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14879 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14880 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14881 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14884 if (tg3_flag(tp, 5705_PLUS)) {
14885 ec->rx_coalesce_usecs_irq = 0;
14886 ec->tx_coalesce_usecs_irq = 0;
14887 ec->stats_block_coalesce_usecs = 0;
14891 static const struct net_device_ops tg3_netdev_ops = {
14892 .ndo_open = tg3_open,
14893 .ndo_stop = tg3_close,
14894 .ndo_start_xmit = tg3_start_xmit,
14895 .ndo_get_stats64 = tg3_get_stats64,
14896 .ndo_validate_addr = eth_validate_addr,
14897 .ndo_set_multicast_list = tg3_set_rx_mode,
14898 .ndo_set_mac_address = tg3_set_mac_addr,
14899 .ndo_do_ioctl = tg3_ioctl,
14900 .ndo_tx_timeout = tg3_tx_timeout,
14901 .ndo_change_mtu = tg3_change_mtu,
14902 .ndo_fix_features = tg3_fix_features,
14903 .ndo_set_features = tg3_set_features,
14904 #ifdef CONFIG_NET_POLL_CONTROLLER
14905 .ndo_poll_controller = tg3_poll_controller,
14909 static int __devinit tg3_init_one(struct pci_dev *pdev,
14910 const struct pci_device_id *ent)
14912 struct net_device *dev;
14914 int i, err, pm_cap;
14915 u32 sndmbx, rcvmbx, intmbx;
14917 u64 dma_mask, persist_dma_mask;
14920 printk_once(KERN_INFO "%s\n", version);
14922 err = pci_enable_device(pdev);
14924 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14928 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14930 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14931 goto err_out_disable_pdev;
14934 pci_set_master(pdev);
14936 /* Find power-management capability. */
14937 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14939 dev_err(&pdev->dev,
14940 "Cannot find Power Management capability, aborting\n");
14942 goto err_out_free_res;
14945 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14947 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14949 goto err_out_free_res;
14952 SET_NETDEV_DEV(dev, &pdev->dev);
14954 tp = netdev_priv(dev);
14957 tp->pm_cap = pm_cap;
14958 tp->rx_mode = TG3_DEF_RX_MODE;
14959 tp->tx_mode = TG3_DEF_TX_MODE;
14962 tp->msg_enable = tg3_debug;
14964 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14966 /* The word/byte swap controls here control register access byte
14967 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14970 tp->misc_host_ctrl =
14971 MISC_HOST_CTRL_MASK_PCI_INT |
14972 MISC_HOST_CTRL_WORD_SWAP |
14973 MISC_HOST_CTRL_INDIR_ACCESS |
14974 MISC_HOST_CTRL_PCISTATE_RW;
14976 /* The NONFRM (non-frame) byte/word swap controls take effect
14977 * on descriptor entries, anything which isn't packet data.
14979 * The StrongARM chips on the board (one for tx, one for rx)
14980 * are running in big-endian mode.
14982 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14983 GRC_MODE_WSWAP_NONFRM_DATA);
14984 #ifdef __BIG_ENDIAN
14985 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14987 spin_lock_init(&tp->lock);
14988 spin_lock_init(&tp->indirect_lock);
14989 INIT_WORK(&tp->reset_task, tg3_reset_task);
14991 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14993 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14995 goto err_out_free_dev;
14998 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14999 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15001 dev->ethtool_ops = &tg3_ethtool_ops;
15002 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15003 dev->netdev_ops = &tg3_netdev_ops;
15004 dev->irq = pdev->irq;
15006 err = tg3_get_invariants(tp);
15008 dev_err(&pdev->dev,
15009 "Problem fetching invariants of chip, aborting\n");
15010 goto err_out_iounmap;
15013 /* The EPB bridge inside 5714, 5715, and 5780 and any
15014 * device behind the EPB cannot support DMA addresses > 40-bit.
15015 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15016 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15017 * do DMA address check in tg3_start_xmit().
15019 if (tg3_flag(tp, IS_5788))
15020 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15021 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15022 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15023 #ifdef CONFIG_HIGHMEM
15024 dma_mask = DMA_BIT_MASK(64);
15027 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15029 /* Configure DMA attributes. */
15030 if (dma_mask > DMA_BIT_MASK(32)) {
15031 err = pci_set_dma_mask(pdev, dma_mask);
15033 features |= NETIF_F_HIGHDMA;
15034 err = pci_set_consistent_dma_mask(pdev,
15037 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15038 "DMA for consistent allocations\n");
15039 goto err_out_iounmap;
15043 if (err || dma_mask == DMA_BIT_MASK(32)) {
15044 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15046 dev_err(&pdev->dev,
15047 "No usable DMA configuration, aborting\n");
15048 goto err_out_iounmap;
15052 tg3_init_bufmgr_config(tp);
15054 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15056 /* 5700 B0 chips do not support checksumming correctly due
15057 * to hardware bugs.
15059 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15060 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15062 if (tg3_flag(tp, 5755_PLUS))
15063 features |= NETIF_F_IPV6_CSUM;
15066 /* TSO is on by default on chips that support hardware TSO.
15067 * Firmware TSO on older chips gives lower performance, so it
15068 * is off by default, but can be enabled using ethtool.
15070 if ((tg3_flag(tp, HW_TSO_1) ||
15071 tg3_flag(tp, HW_TSO_2) ||
15072 tg3_flag(tp, HW_TSO_3)) &&
15073 (features & NETIF_F_IP_CSUM))
15074 features |= NETIF_F_TSO;
15075 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15076 if (features & NETIF_F_IPV6_CSUM)
15077 features |= NETIF_F_TSO6;
15078 if (tg3_flag(tp, HW_TSO_3) ||
15079 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15080 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15081 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15084 features |= NETIF_F_TSO_ECN;
15087 dev->features |= features;
15088 dev->vlan_features |= features;
15091 * Add loopback capability only for a subset of devices that support
15092 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15093 * loopback for the remaining devices.
15095 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15096 !tg3_flag(tp, CPMU_PRESENT))
15097 /* Add the loopback capability */
15098 features |= NETIF_F_LOOPBACK;
15100 dev->hw_features |= features;
15102 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15103 !tg3_flag(tp, TSO_CAPABLE) &&
15104 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15105 tg3_flag_set(tp, MAX_RXPEND_64);
15106 tp->rx_pending = 63;
15109 err = tg3_get_device_address(tp);
15111 dev_err(&pdev->dev,
15112 "Could not obtain valid ethernet address, aborting\n");
15113 goto err_out_iounmap;
15116 if (tg3_flag(tp, ENABLE_APE)) {
15117 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15118 if (!tp->aperegs) {
15119 dev_err(&pdev->dev,
15120 "Cannot map APE registers, aborting\n");
15122 goto err_out_iounmap;
15125 tg3_ape_lock_init(tp);
15127 if (tg3_flag(tp, ENABLE_ASF))
15128 tg3_read_dash_ver(tp);
15132 * Reset chip in case UNDI or EFI driver did not shutdown
15133 * DMA self test will enable WDMAC and we'll see (spurious)
15134 * pending DMA on the PCI bus at that point.
15136 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15137 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15138 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15139 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15142 err = tg3_test_dma(tp);
15144 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15145 goto err_out_apeunmap;
15148 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15149 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15150 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15151 for (i = 0; i < tp->irq_max; i++) {
15152 struct tg3_napi *tnapi = &tp->napi[i];
15155 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15157 tnapi->int_mbox = intmbx;
15163 tnapi->consmbox = rcvmbx;
15164 tnapi->prodmbox = sndmbx;
15167 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15169 tnapi->coal_now = HOSTCC_MODE_NOW;
15171 if (!tg3_flag(tp, SUPPORT_MSIX))
15175 * If we support MSIX, we'll be using RSS. If we're using
15176 * RSS, the first vector only handles link interrupts and the
15177 * remaining vectors handle rx and tx interrupts. Reuse the
15178 * mailbox values for the next iteration. The values we setup
15179 * above are still useful for the single vectored mode.
15194 pci_set_drvdata(pdev, dev);
15196 err = register_netdev(dev);
15198 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15199 goto err_out_apeunmap;
15202 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15203 tp->board_part_number,
15204 tp->pci_chip_rev_id,
15205 tg3_bus_string(tp, str),
15208 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15209 struct phy_device *phydev;
15210 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15212 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15213 phydev->drv->name, dev_name(&phydev->dev));
15217 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15218 ethtype = "10/100Base-TX";
15219 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15220 ethtype = "1000Base-SX";
15222 ethtype = "10/100/1000Base-T";
15224 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15225 "(WireSpeed[%d], EEE[%d])\n",
15226 tg3_phy_string(tp), ethtype,
15227 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15228 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15231 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15232 (dev->features & NETIF_F_RXCSUM) != 0,
15233 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15234 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15235 tg3_flag(tp, ENABLE_ASF) != 0,
15236 tg3_flag(tp, TSO_CAPABLE) != 0);
15237 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15239 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15240 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15242 pci_save_state(pdev);
15248 iounmap(tp->aperegs);
15249 tp->aperegs = NULL;
15262 pci_release_regions(pdev);
15264 err_out_disable_pdev:
15265 pci_disable_device(pdev);
15266 pci_set_drvdata(pdev, NULL);
15270 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15272 struct net_device *dev = pci_get_drvdata(pdev);
15275 struct tg3 *tp = netdev_priv(dev);
15278 release_firmware(tp->fw);
15280 cancel_work_sync(&tp->reset_task);
15282 if (!tg3_flag(tp, USE_PHYLIB)) {
15287 unregister_netdev(dev);
15289 iounmap(tp->aperegs);
15290 tp->aperegs = NULL;
15297 pci_release_regions(pdev);
15298 pci_disable_device(pdev);
15299 pci_set_drvdata(pdev, NULL);
15303 #ifdef CONFIG_PM_SLEEP
15304 static int tg3_suspend(struct device *device)
15306 struct pci_dev *pdev = to_pci_dev(device);
15307 struct net_device *dev = pci_get_drvdata(pdev);
15308 struct tg3 *tp = netdev_priv(dev);
15311 if (!netif_running(dev))
15314 flush_work_sync(&tp->reset_task);
15316 tg3_netif_stop(tp);
15318 del_timer_sync(&tp->timer);
15320 tg3_full_lock(tp, 1);
15321 tg3_disable_ints(tp);
15322 tg3_full_unlock(tp);
15324 netif_device_detach(dev);
15326 tg3_full_lock(tp, 0);
15327 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15328 tg3_flag_clear(tp, INIT_COMPLETE);
15329 tg3_full_unlock(tp);
15331 err = tg3_power_down_prepare(tp);
15335 tg3_full_lock(tp, 0);
15337 tg3_flag_set(tp, INIT_COMPLETE);
15338 err2 = tg3_restart_hw(tp, 1);
15342 tp->timer.expires = jiffies + tp->timer_offset;
15343 add_timer(&tp->timer);
15345 netif_device_attach(dev);
15346 tg3_netif_start(tp);
15349 tg3_full_unlock(tp);
15358 static int tg3_resume(struct device *device)
15360 struct pci_dev *pdev = to_pci_dev(device);
15361 struct net_device *dev = pci_get_drvdata(pdev);
15362 struct tg3 *tp = netdev_priv(dev);
15365 if (!netif_running(dev))
15368 netif_device_attach(dev);
15370 tg3_full_lock(tp, 0);
15372 tg3_flag_set(tp, INIT_COMPLETE);
15373 err = tg3_restart_hw(tp, 1);
15377 tp->timer.expires = jiffies + tp->timer_offset;
15378 add_timer(&tp->timer);
15380 tg3_netif_start(tp);
15383 tg3_full_unlock(tp);
15391 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15392 #define TG3_PM_OPS (&tg3_pm_ops)
15396 #define TG3_PM_OPS NULL
15398 #endif /* CONFIG_PM_SLEEP */
15401 * tg3_io_error_detected - called when PCI error is detected
15402 * @pdev: Pointer to PCI device
15403 * @state: The current pci connection state
15405 * This function is called after a PCI bus error affecting
15406 * this device has been detected.
15408 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15409 pci_channel_state_t state)
15411 struct net_device *netdev = pci_get_drvdata(pdev);
15412 struct tg3 *tp = netdev_priv(netdev);
15413 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15415 netdev_info(netdev, "PCI I/O error detected\n");
15419 if (!netif_running(netdev))
15424 tg3_netif_stop(tp);
15426 del_timer_sync(&tp->timer);
15427 tg3_flag_clear(tp, RESTART_TIMER);
15429 /* Want to make sure that the reset task doesn't run */
15430 cancel_work_sync(&tp->reset_task);
15431 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15432 tg3_flag_clear(tp, RESTART_TIMER);
15434 netif_device_detach(netdev);
15436 /* Clean up software state, even if MMIO is blocked */
15437 tg3_full_lock(tp, 0);
15438 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15439 tg3_full_unlock(tp);
15442 if (state == pci_channel_io_perm_failure)
15443 err = PCI_ERS_RESULT_DISCONNECT;
15445 pci_disable_device(pdev);
15453 * tg3_io_slot_reset - called after the pci bus has been reset.
15454 * @pdev: Pointer to PCI device
15456 * Restart the card from scratch, as if from a cold-boot.
15457 * At this point, the card has exprienced a hard reset,
15458 * followed by fixups by BIOS, and has its config space
15459 * set up identically to what it was at cold boot.
15461 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15463 struct net_device *netdev = pci_get_drvdata(pdev);
15464 struct tg3 *tp = netdev_priv(netdev);
15465 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15470 if (pci_enable_device(pdev)) {
15471 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15475 pci_set_master(pdev);
15476 pci_restore_state(pdev);
15477 pci_save_state(pdev);
15479 if (!netif_running(netdev)) {
15480 rc = PCI_ERS_RESULT_RECOVERED;
15484 err = tg3_power_up(tp);
15486 netdev_err(netdev, "Failed to restore register access.\n");
15490 rc = PCI_ERS_RESULT_RECOVERED;
15499 * tg3_io_resume - called when traffic can start flowing again.
15500 * @pdev: Pointer to PCI device
15502 * This callback is called when the error recovery driver tells
15503 * us that its OK to resume normal operation.
15505 static void tg3_io_resume(struct pci_dev *pdev)
15507 struct net_device *netdev = pci_get_drvdata(pdev);
15508 struct tg3 *tp = netdev_priv(netdev);
15513 if (!netif_running(netdev))
15516 tg3_full_lock(tp, 0);
15517 tg3_flag_set(tp, INIT_COMPLETE);
15518 err = tg3_restart_hw(tp, 1);
15519 tg3_full_unlock(tp);
15521 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15525 netif_device_attach(netdev);
15527 tp->timer.expires = jiffies + tp->timer_offset;
15528 add_timer(&tp->timer);
15530 tg3_netif_start(tp);
15538 static struct pci_error_handlers tg3_err_handler = {
15539 .error_detected = tg3_io_error_detected,
15540 .slot_reset = tg3_io_slot_reset,
15541 .resume = tg3_io_resume
15544 static struct pci_driver tg3_driver = {
15545 .name = DRV_MODULE_NAME,
15546 .id_table = tg3_pci_tbl,
15547 .probe = tg3_init_one,
15548 .remove = __devexit_p(tg3_remove_one),
15549 .err_handler = &tg3_err_handler,
15550 .driver.pm = TG3_PM_OPS,
15553 static int __init tg3_init(void)
15555 return pci_register_driver(&tg3_driver);
15558 static void __exit tg3_cleanup(void)
15560 pci_unregister_driver(&tg3_driver);
15563 module_init(tg3_init);
15564 module_exit(tg3_cleanup);