2 * tg3.c: Broadcom Tigon3 ethernet driver.
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 return test_bit(flag, bits);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 clear_bit(flag, bits);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM 3
101 #define TG3_MIN_NUM 137
103 #define RESET_KIND_SHUTDOWN 0
104 #define RESET_KIND_INIT 1
105 #define RESET_KIND_SUSPEND 2
107 #define TG3_DEF_RX_MODE 0
108 #define TG3_DEF_TX_MODE 0
109 #define TG3_DEF_MSG_ENABLE \
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121 /* length of time before we decide the hardware is borked,
122 * and dev->tx_timeout() should be called to fix the problem
125 #define TG3_TX_TIMEOUT (5 * HZ)
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133 * You can't change the ring sizes, but you can change where you place
134 * them in the NIC onboard memory.
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING 200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145 /* Do not place this n-ring entries value into the tp struct itself,
146 * we really want to expose these constants to GCC so that modulo et
147 * al. operations are done with shifts and masks instead of with
148 * hw multiply/modulo instructions. Another solution would be to
149 * replace things like '% foo' with '& (foo - 1)'.
152 #define TG3_TX_RING_SIZE 512
153 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165 #define TG3_DMA_BYTE_ENAB 64
167 #define TG3_RX_STD_DMA_SZ 1536
168 #define TG3_RX_JMB_DMA_SZ 9046
170 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182 * that are at least dword aligned when used in PCIX mode. The driver
183 * works around this bug by double copying the packet. This workaround
184 * is built into the normal double copy length check for efficiency.
186 * However, the double copy is only necessary on those architectures
187 * where unaligned memory accesses are inefficient. For those architectures
188 * where unaligned memory accesses incur little penalty, we can reintegrate
189 * the 5701 in the normal rx path. Doing so saves a device structure
190 * dereference by hardcoding the double copy threshold in place.
192 #define TG3_RX_COPY_THRESHOLD 256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K 2048
208 #define TG3_TX_BD_DMA_MAX_4K 4096
210 #define TG3_RAW_IP_ALIGN 2
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
216 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218 #define FIRMWARE_TG3 "tigon/tg3.bin"
219 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static const struct pci_device_id tg3_pci_tbl[] = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
358 static const struct {
359 const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
363 { "rx_ucast_packets" },
364 { "rx_mcast_packets" },
365 { "rx_bcast_packets" },
367 { "rx_align_errors" },
368 { "rx_xon_pause_rcvd" },
369 { "rx_xoff_pause_rcvd" },
370 { "rx_mac_ctrl_rcvd" },
371 { "rx_xoff_entered" },
372 { "rx_frame_too_long_errors" },
374 { "rx_undersize_packets" },
375 { "rx_in_length_errors" },
376 { "rx_out_length_errors" },
377 { "rx_64_or_less_octet_packets" },
378 { "rx_65_to_127_octet_packets" },
379 { "rx_128_to_255_octet_packets" },
380 { "rx_256_to_511_octet_packets" },
381 { "rx_512_to_1023_octet_packets" },
382 { "rx_1024_to_1522_octet_packets" },
383 { "rx_1523_to_2047_octet_packets" },
384 { "rx_2048_to_4095_octet_packets" },
385 { "rx_4096_to_8191_octet_packets" },
386 { "rx_8192_to_9022_octet_packets" },
393 { "tx_flow_control" },
395 { "tx_single_collisions" },
396 { "tx_mult_collisions" },
398 { "tx_excessive_collisions" },
399 { "tx_late_collisions" },
400 { "tx_collide_2times" },
401 { "tx_collide_3times" },
402 { "tx_collide_4times" },
403 { "tx_collide_5times" },
404 { "tx_collide_6times" },
405 { "tx_collide_7times" },
406 { "tx_collide_8times" },
407 { "tx_collide_9times" },
408 { "tx_collide_10times" },
409 { "tx_collide_11times" },
410 { "tx_collide_12times" },
411 { "tx_collide_13times" },
412 { "tx_collide_14times" },
413 { "tx_collide_15times" },
414 { "tx_ucast_packets" },
415 { "tx_mcast_packets" },
416 { "tx_bcast_packets" },
417 { "tx_carrier_sense_errors" },
421 { "dma_writeq_full" },
422 { "dma_write_prioq_full" },
426 { "rx_threshold_hit" },
428 { "dma_readq_full" },
429 { "dma_read_prioq_full" },
430 { "tx_comp_queue_full" },
432 { "ring_set_send_prod_index" },
433 { "ring_status_update" },
435 { "nic_avoided_irqs" },
436 { "nic_tx_threshold_hit" },
438 { "mbuf_lwm_thresh_hit" },
441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST 0
443 #define TG3_LINK_TEST 1
444 #define TG3_REGISTER_TEST 2
445 #define TG3_MEMORY_TEST 3
446 #define TG3_MAC_LOOPB_TEST 4
447 #define TG3_PHY_LOOPB_TEST 5
448 #define TG3_EXT_LOOPB_TEST 6
449 #define TG3_INTERRUPT_TEST 7
452 static const struct {
453 const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 [TG3_NVRAM_TEST] = { "nvram test (online) " },
456 [TG3_LINK_TEST] = { "link test (online) " },
457 [TG3_REGISTER_TEST] = { "register test (offline)" },
458 [TG3_MEMORY_TEST] = { "memory test (offline)" },
459 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
460 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
461 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
462 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
470 writel(val, tp->regs + off);
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
475 return readl(tp->regs + off);
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
480 writel(val, tp->aperegs + off);
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
485 return readl(tp->aperegs + off);
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 spin_lock_irqsave(&tp->indirect_lock, flags);
493 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
500 writel(val, tp->regs + off);
501 readl(tp->regs + off);
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
509 spin_lock_irqsave(&tp->indirect_lock, flags);
510 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 TG3_64BIT_REG_LOW, val);
525 if (off == TG3_RX_STD_PROD_IDX_REG) {
526 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 TG3_64BIT_REG_LOW, val);
531 spin_lock_irqsave(&tp->indirect_lock, flags);
532 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 /* In indirect mode when disabling interrupts, we also need
537 * to clear the interrupt bit in the GRC local ctrl register.
539 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
541 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
551 spin_lock_irqsave(&tp->indirect_lock, flags);
552 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559 * where it is unsafe to read back the register without some delay.
560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
565 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 /* Non-posted methods */
567 tp->write32(tp, off, val);
570 tg3_write32(tp, off, val);
575 /* Wait again after the read for the posted method to guarantee that
576 * the wait time is met.
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
584 tp->write32_mbox(tp, off, val);
585 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 !tg3_flag(tp, ICH_WORKAROUND)))
588 tp->read32_mbox(tp, off);
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
593 void __iomem *mbox = tp->regs + off;
595 if (tg3_flag(tp, TXD_MBOX_HWBUG))
597 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
604 return readl(tp->regs + off + GRCMBOX_BASE);
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
609 writel(val, tp->regs + off + GRCMBOX_BASE);
612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
618 #define tw32(reg, val) tp->write32(tp, reg, val)
619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg) tp->read32(tp, reg)
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 spin_lock_irqsave(&tp->indirect_lock, flags);
632 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
636 /* Always leave this as zero. */
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 tw32_f(TG3PCI_MEM_WIN_DATA, val);
642 /* Always leave this as zero. */
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
658 spin_lock_irqsave(&tp->indirect_lock, flags);
659 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
663 /* Always leave this as zero. */
664 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 *val = tr32(TG3PCI_MEM_WIN_DATA);
669 /* Always leave this as zero. */
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
672 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 static void tg3_ape_lock_init(struct tg3 *tp)
680 if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 regbase = TG3_APE_LOCK_GRANT;
683 regbase = TG3_APE_PER_LOCK_GRANT;
685 /* Make sure the driver hasn't any stale locks. */
686 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
688 case TG3_APE_LOCK_PHY0:
689 case TG3_APE_LOCK_PHY1:
690 case TG3_APE_LOCK_PHY2:
691 case TG3_APE_LOCK_PHY3:
692 bit = APE_LOCK_GRANT_DRIVER;
696 bit = APE_LOCK_GRANT_DRIVER;
698 bit = 1 << tp->pci_fn;
700 tg3_ape_write32(tp, regbase + 4 * i, bit);
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 u32 status, req, gnt, bit;
711 if (!tg3_flag(tp, ENABLE_APE))
715 case TG3_APE_LOCK_GPIO:
716 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 case TG3_APE_LOCK_GRC:
720 case TG3_APE_LOCK_MEM:
722 bit = APE_LOCK_REQ_DRIVER;
724 bit = 1 << tp->pci_fn;
726 case TG3_APE_LOCK_PHY0:
727 case TG3_APE_LOCK_PHY1:
728 case TG3_APE_LOCK_PHY2:
729 case TG3_APE_LOCK_PHY3:
730 bit = APE_LOCK_REQ_DRIVER;
736 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
737 req = TG3_APE_LOCK_REQ;
738 gnt = TG3_APE_LOCK_GRANT;
740 req = TG3_APE_PER_LOCK_REQ;
741 gnt = TG3_APE_PER_LOCK_GRANT;
746 tg3_ape_write32(tp, req + off, bit);
748 /* Wait for up to 1 millisecond to acquire lock. */
749 for (i = 0; i < 100; i++) {
750 status = tg3_ape_read32(tp, gnt + off);
753 if (pci_channel_offline(tp->pdev))
760 /* Revoke the lock request. */
761 tg3_ape_write32(tp, gnt + off, bit);
768 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 if (!tg3_flag(tp, ENABLE_APE))
776 case TG3_APE_LOCK_GPIO:
777 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 case TG3_APE_LOCK_GRC:
781 case TG3_APE_LOCK_MEM:
783 bit = APE_LOCK_GRANT_DRIVER;
785 bit = 1 << tp->pci_fn;
787 case TG3_APE_LOCK_PHY0:
788 case TG3_APE_LOCK_PHY1:
789 case TG3_APE_LOCK_PHY2:
790 case TG3_APE_LOCK_PHY3:
791 bit = APE_LOCK_GRANT_DRIVER;
797 if (tg3_asic_rev(tp) == ASIC_REV_5761)
798 gnt = TG3_APE_LOCK_GRANT;
800 gnt = TG3_APE_PER_LOCK_GRANT;
802 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
810 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
814 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 return timeout_us ? 0 : -EBUSY;
826 #ifdef CONFIG_TIGON3_HWMON
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
831 for (i = 0; i < timeout_us / 10; i++) {
832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
840 return i == timeout_us / 10;
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
847 u32 i, bufoff, msgoff, maxlen, apedata;
849 if (!tg3_flag(tp, APE_HAS_NCSI))
852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 if (apedata != APE_SEG_SIG_MAGIC)
856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 if (!(apedata & APE_FW_STATUS_READY))
860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
862 msgoff = bufoff + 2 * sizeof(u32);
863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
868 /* Cap xfer sizes to scratchpad limits. */
869 length = (len > maxlen) ? maxlen : len;
872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 if (!(apedata & APE_FW_STATUS_READY))
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err = tg3_ape_event_lock(tp, 1000);
881 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 APE_EVENT_STATUS_SCRTCHPD_READ |
883 APE_EVENT_STATUS_EVENT_PENDING;
884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
886 tg3_ape_write32(tp, bufoff, base_off);
887 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
894 if (tg3_ape_wait_for_event(tp, 30000))
897 for (i = 0; length; i += 4, length -= 4) {
898 u32 val = tg3_ape_read32(tp, msgoff + i);
899 memcpy(data, &val, sizeof(u32));
908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
913 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914 if (apedata != APE_SEG_SIG_MAGIC)
917 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918 if (!(apedata & APE_FW_STATUS_READY))
921 /* Wait for up to 20 millisecond for APE to service previous event. */
922 err = tg3_ape_event_lock(tp, 20000);
926 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927 event | APE_EVENT_STATUS_EVENT_PENDING);
929 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
940 if (!tg3_flag(tp, ENABLE_APE))
944 case RESET_KIND_INIT:
945 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
947 APE_HOST_SEG_SIG_MAGIC);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
949 APE_HOST_SEG_LEN_MAGIC);
950 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
951 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
952 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
953 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
954 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
955 APE_HOST_BEHAV_NO_PHYLOCK);
956 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
957 TG3_APE_HOST_DRVR_STATE_START);
959 event = APE_EVENT_STATUS_STATE_START;
961 case RESET_KIND_SHUTDOWN:
962 if (device_may_wakeup(&tp->pdev->dev) &&
963 tg3_flag(tp, WOL_ENABLE)) {
964 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
965 TG3_APE_HOST_WOL_SPEED_AUTO);
966 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
968 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
970 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
972 event = APE_EVENT_STATUS_STATE_UNLOAD;
978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
980 tg3_ape_send_event(tp, event);
983 static void tg3_send_ape_heartbeat(struct tg3 *tp,
984 unsigned long interval)
986 /* Check if hb interval has exceeded */
987 if (!tg3_flag(tp, ENABLE_APE) ||
988 time_before(jiffies, tp->ape_hb_jiffies + interval))
991 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
992 tp->ape_hb_jiffies = jiffies;
995 static void tg3_disable_ints(struct tg3 *tp)
999 tw32(TG3PCI_MISC_HOST_CTRL,
1000 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1001 for (i = 0; i < tp->irq_max; i++)
1002 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 static void tg3_enable_ints(struct tg3 *tp)
1012 tw32(TG3PCI_MISC_HOST_CTRL,
1013 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1015 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1016 for (i = 0; i < tp->irq_cnt; i++) {
1017 struct tg3_napi *tnapi = &tp->napi[i];
1019 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020 if (tg3_flag(tp, 1SHOT_MSI))
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 tp->coal_now |= tnapi->coal_now;
1026 /* Force an initial interrupt */
1027 if (!tg3_flag(tp, TAGGED_STATUS) &&
1028 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1029 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1031 tw32(HOSTCC_MODE, tp->coal_now);
1033 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1038 struct tg3 *tp = tnapi->tp;
1039 struct tg3_hw_status *sblk = tnapi->hw_status;
1040 unsigned int work_exists = 0;
1042 /* check for phy events */
1043 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1044 if (sblk->status & SD_STATUS_LINK_CHG)
1048 /* check for TX work to do */
1049 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 /* check for RX work to do */
1053 if (tnapi->rx_rcb_prod_idx &&
1054 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1061 * similar to tg3_enable_ints, but it accurately determines whether there
1062 * is new work pending and can return without flushing the PIO write
1063 * which reenables interrupts
1065 static void tg3_int_reenable(struct tg3_napi *tnapi)
1067 struct tg3 *tp = tnapi->tp;
1069 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1071 /* When doing tagged status, this work check is unnecessary.
1072 * The last_tag we write above tells the chip which piece of
1073 * work we've completed.
1075 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076 tw32(HOSTCC_MODE, tp->coalesce_mode |
1077 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 static void tg3_switch_clocks(struct tg3 *tp)
1083 u32 orig_clock_ctrl;
1085 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1090 orig_clock_ctrl = clock_ctrl;
1091 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092 CLOCK_CTRL_CLKRUN_OENABLE |
1094 tp->pci_clock_ctrl = clock_ctrl;
1096 if (tg3_flag(tp, 5705_PLUS)) {
1097 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1101 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1106 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 #define PHY_BUSY_LOOPS 5000
1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1122 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1124 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1128 tg3_ape_lock(tp, tp->phy_ape_lock);
1132 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133 MI_COM_PHY_ADDR_MASK);
1134 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135 MI_COM_REG_ADDR_MASK);
1136 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1138 tw32_f(MAC_MI_COM, frame_val);
1140 loops = PHY_BUSY_LOOPS;
1141 while (loops != 0) {
1143 frame_val = tr32(MAC_MI_COM);
1145 if ((frame_val & MI_COM_BUSY) == 0) {
1147 frame_val = tr32(MAC_MI_COM);
1155 *val = frame_val & MI_COM_DATA_MASK;
1159 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160 tw32_f(MAC_MI_MODE, tp->mi_mode);
1164 tg3_ape_unlock(tp, tp->phy_ape_lock);
1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1171 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1181 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1187 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1191 tg3_ape_lock(tp, tp->phy_ape_lock);
1193 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194 MI_COM_PHY_ADDR_MASK);
1195 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196 MI_COM_REG_ADDR_MASK);
1197 frame_val |= (val & MI_COM_DATA_MASK);
1198 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1200 tw32_f(MAC_MI_COM, frame_val);
1202 loops = PHY_BUSY_LOOPS;
1203 while (loops != 0) {
1205 frame_val = tr32(MAC_MI_COM);
1206 if ((frame_val & MI_COM_BUSY) == 0) {
1208 frame_val = tr32(MAC_MI_COM);
1218 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219 tw32_f(MAC_MI_MODE, tp->mi_mode);
1223 tg3_ape_unlock(tp, tp->phy_ape_lock);
1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1230 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1237 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1241 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1260 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1264 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1268 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1273 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1285 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1294 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1296 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1305 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307 MII_TG3_AUXCTL_SHDWSEL_MISC);
1309 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1316 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317 set |= MII_TG3_AUXCTL_MISC_WREN;
1319 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1327 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1333 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1335 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1345 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346 reg | val | MII_TG3_MISC_SHDW_WREN);
1349 static int tg3_bmcr_reset(struct tg3 *tp)
1354 /* OK, reset it, and poll the BMCR_RESET bit until it
1355 * clears or we time out.
1357 phy_control = BMCR_RESET;
1358 err = tg3_writephy(tp, MII_BMCR, phy_control);
1364 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1368 if ((phy_control & BMCR_RESET) == 0) {
1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1382 struct tg3 *tp = bp->priv;
1385 spin_lock_bh(&tp->lock);
1387 if (__tg3_readphy(tp, mii_id, reg, &val))
1390 spin_unlock_bh(&tp->lock);
1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1397 struct tg3 *tp = bp->priv;
1400 spin_lock_bh(&tp->lock);
1402 if (__tg3_writephy(tp, mii_id, reg, val))
1405 spin_unlock_bh(&tp->lock);
1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 struct phy_device *phydev;
1415 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417 case PHY_ID_BCM50610:
1418 case PHY_ID_BCM50610M:
1419 val = MAC_PHYCFG2_50610_LED_MODES;
1421 case PHY_ID_BCMAC131:
1422 val = MAC_PHYCFG2_AC131_LED_MODES;
1424 case PHY_ID_RTL8211C:
1425 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1427 case PHY_ID_RTL8201E:
1428 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1434 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435 tw32(MAC_PHYCFG2, val);
1437 val = tr32(MAC_PHYCFG1);
1438 val &= ~(MAC_PHYCFG1_RGMII_INT |
1439 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441 tw32(MAC_PHYCFG1, val);
1446 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448 MAC_PHYCFG2_FMODE_MASK_MASK |
1449 MAC_PHYCFG2_GMODE_MASK_MASK |
1450 MAC_PHYCFG2_ACT_MASK_MASK |
1451 MAC_PHYCFG2_QUAL_MASK_MASK |
1452 MAC_PHYCFG2_INBAND_ENABLE;
1454 tw32(MAC_PHYCFG2, val);
1456 val = tr32(MAC_PHYCFG1);
1457 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1465 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467 tw32(MAC_PHYCFG1, val);
1469 val = tr32(MAC_EXT_RGMII_MODE);
1470 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471 MAC_RGMII_MODE_RX_QUALITY |
1472 MAC_RGMII_MODE_RX_ACTIVITY |
1473 MAC_RGMII_MODE_RX_ENG_DET |
1474 MAC_RGMII_MODE_TX_ENABLE |
1475 MAC_RGMII_MODE_TX_LOWPWR |
1476 MAC_RGMII_MODE_TX_RESET);
1477 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479 val |= MAC_RGMII_MODE_RX_INT_B |
1480 MAC_RGMII_MODE_RX_QUALITY |
1481 MAC_RGMII_MODE_RX_ACTIVITY |
1482 MAC_RGMII_MODE_RX_ENG_DET;
1483 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484 val |= MAC_RGMII_MODE_TX_ENABLE |
1485 MAC_RGMII_MODE_TX_LOWPWR |
1486 MAC_RGMII_MODE_TX_RESET;
1488 tw32(MAC_EXT_RGMII_MODE, val);
1491 static void tg3_mdio_start(struct tg3 *tp)
1493 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494 tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 if (tg3_flag(tp, MDIOBUS_INITED) &&
1498 tg3_asic_rev(tp) == ASIC_REV_5785)
1499 tg3_mdio_config_5785(tp);
1502 static int tg3_mdio_init(struct tg3 *tp)
1506 struct phy_device *phydev;
1508 if (tg3_flag(tp, 5717_PLUS)) {
1511 tp->phy_addr = tp->pci_fn + 1;
1513 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1516 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517 TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 addr = ssb_gige_get_phyaddr(tp->pdev);
1526 tp->phy_addr = addr;
1528 tp->phy_addr = TG3_PHY_MII_ADDR;
1532 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 tp->mdio_bus = mdiobus_alloc();
1536 if (tp->mdio_bus == NULL)
1539 tp->mdio_bus->name = "tg3 mdio bus";
1540 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542 tp->mdio_bus->priv = tp;
1543 tp->mdio_bus->parent = &tp->pdev->dev;
1544 tp->mdio_bus->read = &tg3_mdio_read;
1545 tp->mdio_bus->write = &tg3_mdio_write;
1546 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548 /* The bus registration will look for all the PHYs on the mdio bus.
1549 * Unfortunately, it does not ensure the PHY is powered up before
1550 * accessing the PHY ID registers. A chip reset is the
1551 * quickest way to bring the device back to an operational state..
1553 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1556 i = mdiobus_register(tp->mdio_bus);
1558 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559 mdiobus_free(tp->mdio_bus);
1563 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565 if (!phydev || !phydev->drv) {
1566 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567 mdiobus_unregister(tp->mdio_bus);
1568 mdiobus_free(tp->mdio_bus);
1572 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573 case PHY_ID_BCM57780:
1574 phydev->interface = PHY_INTERFACE_MODE_GMII;
1575 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 case PHY_ID_BCM50610:
1578 case PHY_ID_BCM50610M:
1579 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580 PHY_BRCM_RX_REFCLK_UNUSED |
1581 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 case PHY_ID_RTL8211C:
1585 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 case PHY_ID_RTL8201E:
1588 case PHY_ID_BCMAC131:
1589 phydev->interface = PHY_INTERFACE_MODE_MII;
1590 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1595 tg3_flag_set(tp, MDIOBUS_INITED);
1597 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598 tg3_mdio_config_5785(tp);
1603 static void tg3_mdio_fini(struct tg3 *tp)
1605 if (tg3_flag(tp, MDIOBUS_INITED)) {
1606 tg3_flag_clear(tp, MDIOBUS_INITED);
1607 mdiobus_unregister(tp->mdio_bus);
1608 mdiobus_free(tp->mdio_bus);
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1617 val = tr32(GRC_RX_CPU_EVENT);
1618 val |= GRC_RX_CPU_DRIVER_EVENT;
1619 tw32_f(GRC_RX_CPU_EVENT, val);
1621 tp->last_event_jiffies = jiffies;
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 unsigned int delay_cnt;
1633 /* If enough time has passed, no wait is necessary. */
1634 time_remain = (long)(tp->last_event_jiffies + 1 +
1635 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 if (time_remain < 0)
1640 /* Check if we can shorten the wait time. */
1641 delay_cnt = jiffies_to_usecs(time_remain);
1642 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644 delay_cnt = (delay_cnt >> 3) + 1;
1646 for (i = 0; i < delay_cnt; i++) {
1647 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 if (pci_channel_offline(tp->pdev))
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1662 if (!tg3_readphy(tp, MII_BMCR, ®))
1664 if (!tg3_readphy(tp, MII_BMSR, ®))
1665 val |= (reg & 0xffff);
1669 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1671 if (!tg3_readphy(tp, MII_LPA, ®))
1672 val |= (reg & 0xffff);
1676 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1679 if (!tg3_readphy(tp, MII_STAT1000, ®))
1680 val |= (reg & 0xffff);
1684 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1696 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 tg3_phy_gather_ump_data(tp, data);
1701 tg3_wait_for_event_ack(tp);
1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710 tg3_generate_fw_event(tp);
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1716 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717 /* Wait for RX cpu to ACK the previous event. */
1718 tg3_wait_for_event_ack(tp);
1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722 tg3_generate_fw_event(tp);
1724 /* Wait for RX cpu to ACK this event. */
1725 tg3_wait_for_event_ack(tp);
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 case RESET_KIND_INIT:
1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742 case RESET_KIND_SHUTDOWN:
1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747 case RESET_KIND_SUSPEND:
1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 case RESET_KIND_INIT:
1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 DRV_STATE_START_DONE);
1768 case RESET_KIND_SHUTDOWN:
1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 DRV_STATE_UNLOAD_DONE);
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 if (tg3_flag(tp, ENABLE_ASF)) {
1784 case RESET_KIND_INIT:
1785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 case RESET_KIND_SHUTDOWN:
1790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1794 case RESET_KIND_SUSPEND:
1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1805 static int tg3_poll_fw(struct tg3 *tp)
1810 if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 if (tg3_flag(tp, IS_SSB_CORE)) {
1814 /* We don't use firmware. */
1818 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819 /* Wait up to 20ms for init done. */
1820 for (i = 0; i < 200; i++) {
1821 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 if (pci_channel_offline(tp->pdev))
1831 /* Wait for firmware initialization to complete. */
1832 for (i = 0; i < 100000; i++) {
1833 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 if (pci_channel_offline(tp->pdev)) {
1837 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838 tg3_flag_set(tp, NO_FWARE_REPORTED);
1839 netdev_info(tp->dev, "No firmware running\n");
1848 /* Chip might not be fitted with firmware. Some Sun onboard
1849 * parts are configured like that. So don't signal the timeout
1850 * of the above loop as an error, but do report the lack of
1851 * running firmware once.
1853 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856 netdev_info(tp->dev, "No firmware running\n");
1859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860 /* The 57765 A0 needs a little more
1861 * time to do some important work.
1869 static void tg3_link_report(struct tg3 *tp)
1871 if (!netif_carrier_ok(tp->dev)) {
1872 netif_info(tp, link, tp->dev, "Link is down\n");
1873 tg3_ump_link_report(tp);
1874 } else if (netif_msg_link(tp)) {
1875 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876 (tp->link_config.active_speed == SPEED_1000 ?
1878 (tp->link_config.active_speed == SPEED_100 ?
1880 (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890 netdev_info(tp->dev, "EEE is %s\n",
1891 tp->setlpicnt ? "enabled" : "disabled");
1893 tg3_ump_link_report(tp);
1896 tp->link_up = netif_carrier_ok(tp->dev);
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1903 if (adv & ADVERTISE_PAUSE_CAP) {
1904 flowctrl |= FLOW_CTRL_RX;
1905 if (!(adv & ADVERTISE_PAUSE_ASYM))
1906 flowctrl |= FLOW_CTRL_TX;
1907 } else if (adv & ADVERTISE_PAUSE_ASYM)
1908 flowctrl |= FLOW_CTRL_TX;
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1917 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918 miireg = ADVERTISE_1000XPAUSE;
1919 else if (flow_ctrl & FLOW_CTRL_TX)
1920 miireg = ADVERTISE_1000XPSE_ASYM;
1921 else if (flow_ctrl & FLOW_CTRL_RX)
1922 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1933 if (adv & ADVERTISE_1000XPAUSE) {
1934 flowctrl |= FLOW_CTRL_RX;
1935 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936 flowctrl |= FLOW_CTRL_TX;
1937 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1938 flowctrl |= FLOW_CTRL_TX;
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1947 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950 if (lcladv & ADVERTISE_1000XPAUSE)
1952 if (rmtadv & ADVERTISE_1000XPAUSE)
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1963 u32 old_rx_mode = tp->rx_mode;
1964 u32 old_tx_mode = tp->tx_mode;
1966 if (tg3_flag(tp, USE_PHYLIB))
1967 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 autoneg = tp->link_config.autoneg;
1971 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 flowctrl = tp->link_config.flowctrl;
1979 tp->link_config.active_flowctrl = flowctrl;
1981 if (flowctrl & FLOW_CTRL_RX)
1982 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986 if (old_rx_mode != tp->rx_mode)
1987 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989 if (flowctrl & FLOW_CTRL_TX)
1990 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994 if (old_tx_mode != tp->tx_mode)
1995 tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 static void tg3_adjust_link(struct net_device *dev)
2000 u8 oldflowctrl, linkmesg = 0;
2001 u32 mac_mode, lcl_adv, rmt_adv;
2002 struct tg3 *tp = netdev_priv(dev);
2003 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005 spin_lock_bh(&tp->lock);
2007 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008 MAC_MODE_HALF_DUPLEX);
2010 oldflowctrl = tp->link_config.active_flowctrl;
2016 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017 mac_mode |= MAC_MODE_PORT_MODE_MII;
2018 else if (phydev->speed == SPEED_1000 ||
2019 tg3_asic_rev(tp) != ASIC_REV_5785)
2020 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024 if (phydev->duplex == DUPLEX_HALF)
2025 mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 lcl_adv = mii_advertise_flowctrl(
2028 tp->link_config.flowctrl);
2031 rmt_adv = LPA_PAUSE_CAP;
2032 if (phydev->asym_pause)
2033 rmt_adv |= LPA_PAUSE_ASYM;
2036 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040 if (mac_mode != tp->mac_mode) {
2041 tp->mac_mode = mac_mode;
2042 tw32_f(MAC_MODE, tp->mac_mode);
2046 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047 if (phydev->speed == SPEED_10)
2049 MAC_MI_STAT_10MBPS_MODE |
2050 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056 tw32(MAC_TX_LENGTHS,
2057 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058 (6 << TX_LENGTHS_IPG_SHIFT) |
2059 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 tw32(MAC_TX_LENGTHS,
2062 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063 (6 << TX_LENGTHS_IPG_SHIFT) |
2064 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066 if (phydev->link != tp->old_link ||
2067 phydev->speed != tp->link_config.active_speed ||
2068 phydev->duplex != tp->link_config.active_duplex ||
2069 oldflowctrl != tp->link_config.active_flowctrl)
2072 tp->old_link = phydev->link;
2073 tp->link_config.active_speed = phydev->speed;
2074 tp->link_config.active_duplex = phydev->duplex;
2076 spin_unlock_bh(&tp->lock);
2079 tg3_link_report(tp);
2082 static int tg3_phy_init(struct tg3 *tp)
2084 struct phy_device *phydev;
2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 /* Bring the PHY back to a known state. */
2092 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094 /* Attach the MAC to the PHY. */
2095 phydev = phy_connect(tp->dev, phydev_name(phydev),
2096 tg3_adjust_link, phydev->interface);
2097 if (IS_ERR(phydev)) {
2098 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099 return PTR_ERR(phydev);
2102 /* Mask with MAC supported features. */
2103 switch (phydev->interface) {
2104 case PHY_INTERFACE_MODE_GMII:
2105 case PHY_INTERFACE_MODE_RGMII:
2106 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107 phy_set_max_speed(phydev, SPEED_1000);
2108 phy_support_asym_pause(phydev);
2112 case PHY_INTERFACE_MODE_MII:
2113 phy_set_max_speed(phydev, SPEED_100);
2114 phy_support_asym_pause(phydev);
2117 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2121 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123 phy_attached_info(phydev);
2128 static void tg3_phy_start(struct tg3 *tp)
2130 struct phy_device *phydev;
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2138 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2139 phydev->speed = tp->link_config.speed;
2140 phydev->duplex = tp->link_config.duplex;
2141 phydev->autoneg = tp->link_config.autoneg;
2142 ethtool_convert_legacy_u32_to_link_mode(
2143 phydev->advertising, tp->link_config.advertising);
2148 phy_start_aneg(phydev);
2151 static void tg3_phy_stop(struct tg3 *tp)
2153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 static void tg3_phy_fini(struct tg3 *tp)
2161 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2172 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176 /* Cannot do read-modify-write on 5401 */
2177 err = tg3_phy_auxctl_write(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2184 err = tg3_phy_auxctl_read(tp,
2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2189 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190 err = tg3_phy_auxctl_write(tp,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2201 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 tg3_writephy(tp, MII_TG3_FET_TEST,
2205 phytest | MII_TG3_FET_SHADOW_EN);
2206 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2221 if (!tg3_flag(tp, 5705_PLUS) ||
2222 (tg3_flag(tp, 5717_PLUS) &&
2223 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227 tg3_phy_fet_toggle_apd(tp, enable);
2231 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2232 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2233 MII_TG3_MISC_SHDW_SCR5_SDTL |
2234 MII_TG3_MISC_SHDW_SCR5_C125OE;
2235 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2236 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2252 if (!tg3_flag(tp, 5705_PLUS) ||
2253 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2260 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262 tg3_writephy(tp, MII_TG3_FET_TEST,
2263 ephy | MII_TG3_FET_SHADOW_EN);
2264 if (!tg3_readphy(tp, reg, &phy)) {
2266 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 tg3_writephy(tp, reg, phy);
2271 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2276 ret = tg3_phy_auxctl_read(tp,
2277 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 tg3_phy_auxctl_write(tp,
2284 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2289 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2294 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2300 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 static void tg3_phy_apply_otp(struct tg3 *tp)
2312 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2316 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2317 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2320 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2321 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2324 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2325 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2328 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2331 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2334 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2335 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337 tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2343 struct ethtool_eee *dest = &tp->eee;
2345 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2351 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354 /* Pull eee_active */
2355 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2356 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2357 dest->eee_active = 1;
2359 dest->eee_active = 0;
2361 /* Pull lp advertised settings */
2362 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2366 /* Pull advertised and eee_enabled settings */
2367 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369 dest->eee_enabled = !!val;
2370 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2372 /* Pull tx_lpi_enabled */
2373 val = tr32(TG3_CPMU_EEE_MODE);
2374 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376 /* Pull lpi timer value */
2377 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2384 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2389 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391 tp->link_config.active_duplex == DUPLEX_FULL &&
2392 (tp->link_config.active_speed == SPEED_100 ||
2393 tp->link_config.active_speed == SPEED_1000)) {
2396 if (tp->link_config.active_speed == SPEED_1000)
2397 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403 tg3_eee_pull_config(tp, NULL);
2404 if (tp->eee.eee_active)
2408 if (!tp->setlpicnt) {
2409 if (current_link_up &&
2410 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2411 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2412 tg3_phy_toggle_auxctl_smdsp(tp, false);
2415 val = tr32(TG3_CPMU_EEE_MODE);
2416 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2420 static void tg3_phy_eee_enable(struct tg3 *tp)
2424 if (tp->link_config.active_speed == SPEED_1000 &&
2425 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2426 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2427 tg3_flag(tp, 57765_CLASS)) &&
2428 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2429 val = MII_TG3_DSP_TAP26_ALNOKO |
2430 MII_TG3_DSP_TAP26_RMRXSTO;
2431 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2432 tg3_phy_toggle_auxctl_smdsp(tp, false);
2435 val = tr32(TG3_CPMU_EEE_MODE);
2436 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 static int tg3_wait_macro_done(struct tg3 *tp)
2446 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2447 if ((tmp32 & 0x1000) == 0)
2457 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 static const u32 test_pat[4][6] = {
2460 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2461 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2462 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2463 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2467 for (chan = 0; chan < 4; chan++) {
2470 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2471 (chan * 0x2000) | 0x0200);
2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474 for (i = 0; i < 6; i++)
2475 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2479 if (tg3_wait_macro_done(tp)) {
2484 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485 (chan * 0x2000) | 0x0200);
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2487 if (tg3_wait_macro_done(tp)) {
2492 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2493 if (tg3_wait_macro_done(tp)) {
2498 for (i = 0; i < 6; i += 2) {
2501 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2502 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2503 tg3_wait_macro_done(tp)) {
2509 if (low != test_pat[chan][i] ||
2510 high != test_pat[chan][i+1]) {
2511 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2512 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2523 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2527 for (chan = 0; chan < 4; chan++) {
2530 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2531 (chan * 0x2000) | 0x0200);
2532 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2533 for (i = 0; i < 6; i++)
2534 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2535 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2536 if (tg3_wait_macro_done(tp))
2543 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 u32 reg32, phy9_orig;
2546 int retries, do_phy_reset, err;
2552 err = tg3_bmcr_reset(tp);
2558 /* Disable transmitter and interrupt. */
2559 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2563 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565 /* Set full-duplex, 1000 mbps. */
2566 tg3_writephy(tp, MII_BMCR,
2567 BMCR_FULLDPLX | BMCR_SPEED1000);
2569 /* Set to master mode. */
2570 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573 tg3_writephy(tp, MII_CTRL1000,
2574 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2580 /* Block the PHY control access. */
2581 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586 } while (--retries);
2588 err = tg3_phy_reset_chanpat(tp);
2592 tg3_phydsp_write(tp, 0x8005, 0x0000);
2594 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2595 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597 tg3_phy_toggle_auxctl_smdsp(tp, false);
2599 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2606 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2611 static void tg3_carrier_off(struct tg3 *tp)
2613 netif_carrier_off(tp->dev);
2614 tp->link_up = false;
2617 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 if (tg3_flag(tp, ENABLE_ASF))
2620 netdev_warn(tp->dev,
2621 "Management side-band traffic will be interrupted during phy settings change\n");
2624 /* This will reset the tigon3 PHY if there is no valid
2625 * link unless the FORCE argument is non-zero.
2627 static int tg3_phy_reset(struct tg3 *tp)
2632 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2633 val = tr32(GRC_MISC_CFG);
2634 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637 err = tg3_readphy(tp, MII_BMSR, &val);
2638 err |= tg3_readphy(tp, MII_BMSR, &val);
2642 if (netif_running(tp->dev) && tp->link_up) {
2643 netif_carrier_off(tp->dev);
2644 tg3_link_report(tp);
2647 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2648 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5705) {
2650 err = tg3_phy_reset_5703_4_5(tp);
2657 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2658 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2659 cpmuctrl = tr32(TG3_CPMU_CTRL);
2660 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665 err = tg3_bmcr_reset(tp);
2669 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2670 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2671 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673 tw32(TG3_CPMU_CTRL, cpmuctrl);
2676 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2677 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2678 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2679 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2680 CPMU_LSPD_1000MB_MACCLK_12_5) {
2681 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2687 if (tg3_flag(tp, 5717_PLUS) &&
2688 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691 tg3_phy_apply_otp(tp);
2693 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2694 tg3_phy_toggle_apd(tp, true);
2696 tg3_phy_toggle_apd(tp, false);
2699 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2700 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2702 tg3_phydsp_write(tp, 0x000a, 0x0323);
2703 tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2707 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2712 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 tg3_phydsp_write(tp, 0x000a, 0x310b);
2714 tg3_phydsp_write(tp, 0x201f, 0x9506);
2715 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2716 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2719 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2721 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2722 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2723 tg3_writephy(tp, MII_TG3_TEST1,
2724 MII_TG3_TEST1_TRIM_EN | 0x4);
2726 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728 tg3_phy_toggle_auxctl_smdsp(tp, false);
2732 /* Set Extended packet length bit (bit 14) on all chips that */
2733 /* support jumbo frames */
2734 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2735 /* Cannot do read-modify-write on 5401 */
2736 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2737 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738 /* Set bit 14 with read-modify-write to preserve other bits */
2739 err = tg3_phy_auxctl_read(tp,
2740 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2743 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2747 * jumbo frames transmission.
2749 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2751 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2752 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2756 /* adjust output voltage */
2757 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2761 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763 tg3_phy_toggle_automdix(tp, true);
2764 tg3_phy_set_wirespeed(tp);
2768 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2769 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2770 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2771 TG3_GPIO_MSG_NEED_VAUX)
2772 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2773 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2774 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 12))
2778 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2779 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2780 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 12))
2784 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2788 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2789 tg3_asic_rev(tp) == ASIC_REV_5719)
2790 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792 status = tr32(TG3_CPMU_DRV_STATUS);
2794 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2795 status &= ~(TG3_GPIO_MSG_MASK << shift);
2796 status |= (newstat << shift);
2798 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2799 tg3_asic_rev(tp) == ASIC_REV_5719)
2800 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802 tw32(TG3_CPMU_DRV_STATUS, status);
2804 return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 if (!tg3_flag(tp, IS_NIC))
2812 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5720) {
2815 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2821 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826 TG3_GRC_LCLCTL_PWRSW_DELAY);
2832 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2836 if (!tg3_flag(tp, IS_NIC) ||
2837 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2838 tg3_asic_rev(tp) == ASIC_REV_5701)
2841 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843 tw32_wait_f(GRC_LOCAL_CTRL,
2844 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2845 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847 tw32_wait_f(GRC_LOCAL_CTRL,
2849 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851 tw32_wait_f(GRC_LOCAL_CTRL,
2852 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2853 TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 if (!tg3_flag(tp, IS_NIC))
2861 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2862 tg3_asic_rev(tp) == ASIC_REV_5701) {
2863 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2864 (GRC_LCLCTRL_GPIO_OE0 |
2865 GRC_LCLCTRL_GPIO_OE1 |
2866 GRC_LCLCTRL_GPIO_OE2 |
2867 GRC_LCLCTRL_GPIO_OUTPUT0 |
2868 GRC_LCLCTRL_GPIO_OUTPUT1),
2869 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2871 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2872 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2873 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2874 GRC_LCLCTRL_GPIO_OE1 |
2875 GRC_LCLCTRL_GPIO_OE2 |
2876 GRC_LCLCTRL_GPIO_OUTPUT0 |
2877 GRC_LCLCTRL_GPIO_OUTPUT1 |
2879 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2880 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2883 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2884 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2887 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 u32 grc_local_ctrl = 0;
2893 /* Workaround to prevent overdrawing Amps. */
2894 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2895 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2896 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898 TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 /* On 5753 and variants, GPIO2 cannot be used. */
2902 no_gpio2 = tp->nic_sram_data_cfg &
2903 NIC_SRAM_DATA_CFG_NO_GPIO2;
2905 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2906 GRC_LCLCTRL_GPIO_OE1 |
2907 GRC_LCLCTRL_GPIO_OE2 |
2908 GRC_LCLCTRL_GPIO_OUTPUT1 |
2909 GRC_LCLCTRL_GPIO_OUTPUT2;
2911 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2912 GRC_LCLCTRL_GPIO_OUTPUT2);
2914 tw32_wait_f(GRC_LOCAL_CTRL,
2915 tp->grc_local_ctrl | grc_local_ctrl,
2916 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920 tw32_wait_f(GRC_LOCAL_CTRL,
2921 tp->grc_local_ctrl | grc_local_ctrl,
2922 TG3_GRC_LCLCTL_PWRSW_DELAY);
2925 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2926 tw32_wait_f(GRC_LOCAL_CTRL,
2927 tp->grc_local_ctrl | grc_local_ctrl,
2928 TG3_GRC_LCLCTL_PWRSW_DELAY);
2933 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2937 /* Serialize power state transitions */
2938 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2942 msg = TG3_GPIO_MSG_NEED_VAUX;
2944 msg = tg3_set_function_status(tp, msg);
2946 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2950 tg3_pwrsrc_switch_to_vaux(tp);
2952 tg3_pwrsrc_die_with_vmain(tp);
2955 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 bool need_vaux = false;
2962 /* The GPIOs do something completely different on 57765. */
2963 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2967 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5720) {
2969 tg3_frob_aux_power_5717(tp, include_wol ?
2970 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2974 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2975 struct net_device *dev_peer;
2977 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979 /* remove_one() may have been run on the peer. */
2981 struct tg3 *tp_peer = netdev_priv(dev_peer);
2983 if (tg3_flag(tp_peer, INIT_COMPLETE))
2986 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2987 tg3_flag(tp_peer, ENABLE_ASF))
2992 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2993 tg3_flag(tp, ENABLE_ASF))
2997 tg3_pwrsrc_switch_to_vaux(tp);
2999 tg3_pwrsrc_die_with_vmain(tp);
3002 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3007 if (speed != SPEED_10)
3009 } else if (speed == SPEED_10)
3015 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 switch (tg3_asic_rev(tp)) {
3022 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3031 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3040 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 switch (tg3_asic_rev(tp)) {
3045 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3054 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3058 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3062 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3063 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3064 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3068 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3069 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3074 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076 val = tr32(GRC_MISC_CFG);
3077 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085 tg3_writephy(tp, MII_ADVERTISE, 0);
3086 tg3_writephy(tp, MII_BMCR,
3087 BMCR_ANENABLE | BMCR_ANRESTART);
3089 tg3_writephy(tp, MII_TG3_FET_TEST,
3090 phytest | MII_TG3_FET_SHADOW_EN);
3091 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3092 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094 MII_TG3_FET_SHDW_AUXMODE4,
3097 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100 } else if (do_low_power) {
3101 if (!tg3_phy_led_bug(tp))
3102 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3103 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3106 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3107 MII_TG3_AUXCTL_PCTL_VREG_11V;
3108 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111 /* The PHY should not be powered down on some chips because
3114 if (tg3_phy_power_bug(tp))
3117 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3118 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3119 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3120 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3121 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3122 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 /* tp->lock is held. */
3129 static int tg3_nvram_lock(struct tg3 *tp)
3131 if (tg3_flag(tp, NVRAM)) {
3134 if (tp->nvram_lock_cnt == 0) {
3135 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3136 for (i = 0; i < 8000; i++) {
3137 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3142 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3146 tp->nvram_lock_cnt++;
3151 /* tp->lock is held. */
3152 static void tg3_nvram_unlock(struct tg3 *tp)
3154 if (tg3_flag(tp, NVRAM)) {
3155 if (tp->nvram_lock_cnt > 0)
3156 tp->nvram_lock_cnt--;
3157 if (tp->nvram_lock_cnt == 0)
3158 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3162 /* tp->lock is held. */
3163 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3166 u32 nvaccess = tr32(NVRAM_ACCESS);
3168 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3172 /* tp->lock is held. */
3173 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3176 u32 nvaccess = tr32(NVRAM_ACCESS);
3178 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3182 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3183 u32 offset, u32 *val)
3188 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3192 EEPROM_ADDR_DEVID_MASK |
3194 tw32(GRC_EEPROM_ADDR,
3196 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3197 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3198 EEPROM_ADDR_ADDR_MASK) |
3199 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201 for (i = 0; i < 1000; i++) {
3202 tmp = tr32(GRC_EEPROM_ADDR);
3204 if (tmp & EEPROM_ADDR_COMPLETE)
3208 if (!(tmp & EEPROM_ADDR_COMPLETE))
3211 tmp = tr32(GRC_EEPROM_DATA);
3214 * The data will always be opposite the native endian
3215 * format. Perform a blind byteswap to compensate.
3222 #define NVRAM_CMD_TIMEOUT 10000
3224 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3228 tw32(NVRAM_CMD, nvram_cmd);
3229 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3230 usleep_range(10, 40);
3231 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3237 if (i == NVRAM_CMD_TIMEOUT)
3243 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 if (tg3_flag(tp, NVRAM) &&
3246 tg3_flag(tp, NVRAM_BUFFERED) &&
3247 tg3_flag(tp, FLASH) &&
3248 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3249 (tp->nvram_jedecnum == JEDEC_ATMEL))
3251 addr = ((addr / tp->nvram_pagesize) <<
3252 ATMEL_AT45DB0X1B_PAGE_POS) +
3253 (addr % tp->nvram_pagesize);
3258 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 if (tg3_flag(tp, NVRAM) &&
3261 tg3_flag(tp, NVRAM_BUFFERED) &&
3262 tg3_flag(tp, FLASH) &&
3263 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3264 (tp->nvram_jedecnum == JEDEC_ATMEL))
3266 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3267 tp->nvram_pagesize) +
3268 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3273 /* NOTE: Data read in from NVRAM is byteswapped according to
3274 * the byteswapping settings for all other register accesses.
3275 * tg3 devices are BE devices, so on a BE machine, the data
3276 * returned will be exactly as it is seen in NVRAM. On a LE
3277 * machine, the 32-bit value will be byteswapped.
3279 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3283 if (!tg3_flag(tp, NVRAM))
3284 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286 offset = tg3_nvram_phys_addr(tp, offset);
3288 if (offset > NVRAM_ADDR_MSK)
3291 ret = tg3_nvram_lock(tp);
3295 tg3_enable_nvram_access(tp);
3297 tw32(NVRAM_ADDR, offset);
3298 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3299 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302 *val = tr32(NVRAM_RDDATA);
3304 tg3_disable_nvram_access(tp);
3306 tg3_nvram_unlock(tp);
3311 /* Ensures NVRAM data is in bytestream format. */
3312 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 int res = tg3_nvram_read(tp, offset, &v);
3317 *val = cpu_to_be32(v);
3321 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3322 u32 offset, u32 len, u8 *buf)
3327 for (i = 0; i < len; i += 4) {
3333 memcpy(&data, buf + i, 4);
3336 * The SEEPROM interface expects the data to always be opposite
3337 * the native endian format. We accomplish this by reversing
3338 * all the operations that would have been performed on the
3339 * data from a call to tg3_nvram_read_be32().
3341 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343 val = tr32(GRC_EEPROM_ADDR);
3344 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348 tw32(GRC_EEPROM_ADDR, val |
3349 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3350 (addr & EEPROM_ADDR_ADDR_MASK) |
3354 for (j = 0; j < 1000; j++) {
3355 val = tr32(GRC_EEPROM_ADDR);
3357 if (val & EEPROM_ADDR_COMPLETE)
3361 if (!(val & EEPROM_ADDR_COMPLETE)) {
3370 /* offset and length are dword aligned */
3371 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3375 u32 pagesize = tp->nvram_pagesize;
3376 u32 pagemask = pagesize - 1;
3380 tmp = kmalloc(pagesize, GFP_KERNEL);
3386 u32 phy_addr, page_off, size;
3388 phy_addr = offset & ~pagemask;
3390 for (j = 0; j < pagesize; j += 4) {
3391 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3392 (__be32 *) (tmp + j));
3399 page_off = offset & pagemask;
3406 memcpy(tmp + page_off, buf, size);
3408 offset = offset + (pagesize - page_off);
3410 tg3_enable_nvram_access(tp);
3413 * Before we can erase the flash page, we need
3414 * to issue a special "write enable" command.
3416 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 /* Erase the target page */
3422 tw32(NVRAM_ADDR, phy_addr);
3424 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3425 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 /* Issue another write enable to start the write. */
3431 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 for (j = 0; j < pagesize; j += 4) {
3439 data = *((__be32 *) (tmp + j));
3441 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443 tw32(NVRAM_ADDR, phy_addr + j);
3445 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3449 nvram_cmd |= NVRAM_CMD_FIRST;
3450 else if (j == (pagesize - 4))
3451 nvram_cmd |= NVRAM_CMD_LAST;
3453 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3461 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3462 tg3_nvram_exec_cmd(tp, nvram_cmd);
3469 /* offset and length are dword aligned */
3470 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3475 for (i = 0; i < len; i += 4, offset += 4) {
3476 u32 page_off, phy_addr, nvram_cmd;
3479 memcpy(&data, buf + i, 4);
3480 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482 page_off = offset % tp->nvram_pagesize;
3484 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488 if (page_off == 0 || i == 0)
3489 nvram_cmd |= NVRAM_CMD_FIRST;
3490 if (page_off == (tp->nvram_pagesize - 4))
3491 nvram_cmd |= NVRAM_CMD_LAST;
3494 nvram_cmd |= NVRAM_CMD_LAST;
3496 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3497 !tg3_flag(tp, FLASH) ||
3498 !tg3_flag(tp, 57765_PLUS))
3499 tw32(NVRAM_ADDR, phy_addr);
3501 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3502 !tg3_flag(tp, 5755_PLUS) &&
3503 (tp->nvram_jedecnum == JEDEC_ST) &&
3504 (nvram_cmd & NVRAM_CMD_FIRST)) {
3507 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3508 ret = tg3_nvram_exec_cmd(tp, cmd);
3512 if (!tg3_flag(tp, FLASH)) {
3513 /* We always do complete word writes to eeprom. */
3514 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3524 /* offset and length are dword aligned */
3525 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3529 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3530 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3531 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3535 if (!tg3_flag(tp, NVRAM)) {
3536 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3540 ret = tg3_nvram_lock(tp);
3544 tg3_enable_nvram_access(tp);
3545 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3546 tw32(NVRAM_WRITE1, 0x406);
3548 grc_mode = tr32(GRC_MODE);
3549 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3552 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3559 grc_mode = tr32(GRC_MODE);
3560 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562 tg3_disable_nvram_access(tp);
3563 tg3_nvram_unlock(tp);
3566 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3567 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3574 #define RX_CPU_SCRATCH_BASE 0x30000
3575 #define RX_CPU_SCRATCH_SIZE 0x04000
3576 #define TX_CPU_SCRATCH_BASE 0x34000
3577 #define TX_CPU_SCRATCH_SIZE 0x04000
3579 /* tp->lock is held. */
3580 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 const int iters = 10000;
3585 for (i = 0; i < iters; i++) {
3586 tw32(cpu_base + CPU_STATE, 0xffffffff);
3587 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3588 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590 if (pci_channel_offline(tp->pdev))
3594 return (i == iters) ? -EBUSY : 0;
3597 /* tp->lock is held. */
3598 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3603 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3609 /* tp->lock is held. */
3610 static int tg3_txcpu_pause(struct tg3 *tp)
3612 return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 /* tp->lock is held. */
3616 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 tw32(cpu_base + CPU_STATE, 0xffffffff);
3619 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3622 /* tp->lock is held. */
3623 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 tg3_resume_cpu(tp, RX_CPU_BASE);
3628 /* tp->lock is held. */
3629 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3633 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3636 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641 if (cpu_base == RX_CPU_BASE) {
3642 rc = tg3_rxcpu_pause(tp);
3645 * There is only an Rx CPU for the 5750 derivative in the
3648 if (tg3_flag(tp, IS_SSB_CORE))
3651 rc = tg3_txcpu_pause(tp);
3655 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3656 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3660 /* Clear firmware's nvram arbitration. */
3661 if (tg3_flag(tp, NVRAM))
3662 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3666 static int tg3_fw_data_len(struct tg3 *tp,
3667 const struct tg3_firmware_hdr *fw_hdr)
3671 /* Non fragmented firmware have one firmware header followed by a
3672 * contiguous chunk of data to be written. The length field in that
3673 * header is not the length of data to be written but the complete
3674 * length of the bss. The data length is determined based on
3675 * tp->fw->size minus headers.
3677 * Fragmented firmware have a main header followed by multiple
3678 * fragments. Each fragment is identical to non fragmented firmware
3679 * with a firmware header followed by a contiguous chunk of data. In
3680 * the main header, the length field is unused and set to 0xffffffff.
3681 * In each fragment header the length is the entire size of that
3682 * fragment i.e. fragment data + header length. Data length is
3683 * therefore length field in the header minus TG3_FW_HDR_LEN.
3685 if (tp->fw_len == 0xffffffff)
3686 fw_len = be32_to_cpu(fw_hdr->len);
3688 fw_len = tp->fw->size;
3690 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 /* tp->lock is held. */
3694 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3695 u32 cpu_scratch_base, int cpu_scratch_size,
3696 const struct tg3_firmware_hdr *fw_hdr)
3699 void (*write_op)(struct tg3 *, u32, u32);
3700 int total_len = tp->fw->size;
3702 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704 "%s: Trying to load TX cpu firmware which is 5705\n",
3709 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3710 write_op = tg3_write_mem;
3712 write_op = tg3_write_indirect_reg32;
3714 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3715 /* It is possible that bootcode is still loading at this point.
3716 * Get the nvram lock first before halting the cpu.
3718 int lock_err = tg3_nvram_lock(tp);
3719 err = tg3_halt_cpu(tp, cpu_base);
3721 tg3_nvram_unlock(tp);
3725 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3726 write_op(tp, cpu_scratch_base + i, 0);
3727 tw32(cpu_base + CPU_STATE, 0xffffffff);
3728 tw32(cpu_base + CPU_MODE,
3729 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731 /* Subtract additional main header for fragmented firmware and
3732 * advance to the first fragment
3734 total_len -= TG3_FW_HDR_LEN;
3739 u32 *fw_data = (u32 *)(fw_hdr + 1);
3740 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3741 write_op(tp, cpu_scratch_base +
3742 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744 be32_to_cpu(fw_data[i]));
3746 total_len -= be32_to_cpu(fw_hdr->len);
3748 /* Advance to next fragment */
3749 fw_hdr = (struct tg3_firmware_hdr *)
3750 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3751 } while (total_len > 0);
3759 /* tp->lock is held. */
3760 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 const int iters = 5;
3765 tw32(cpu_base + CPU_STATE, 0xffffffff);
3766 tw32_f(cpu_base + CPU_PC, pc);
3768 for (i = 0; i < iters; i++) {
3769 if (tr32(cpu_base + CPU_PC) == pc)
3771 tw32(cpu_base + CPU_STATE, 0xffffffff);
3772 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3773 tw32_f(cpu_base + CPU_PC, pc);
3777 return (i == iters) ? -EBUSY : 0;
3780 /* tp->lock is held. */
3781 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 const struct tg3_firmware_hdr *fw_hdr;
3786 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788 /* Firmware blob starts with version numbers, followed by
3789 start address and length. We are setting complete length.
3790 length = end_address_of_bss - start_address_of_text.
3791 Remainder is the blob to be loaded contiguously
3792 from start address. */
3794 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3795 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3800 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3801 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3806 /* Now startup only the RX cpu. */
3807 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3808 be32_to_cpu(fw_hdr->base_addr));
3810 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3811 "should be %08x\n", __func__,
3812 tr32(RX_CPU_BASE + CPU_PC),
3813 be32_to_cpu(fw_hdr->base_addr));
3817 tg3_rxcpu_resume(tp);
3822 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 const int iters = 1000;
3828 /* Wait for boot code to complete initialization and enter service
3829 * loop. It is then safe to download service patches
3831 for (i = 0; i < iters; i++) {
3832 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3839 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3843 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845 netdev_warn(tp->dev,
3846 "Other patches exist. Not downloading EEE patch\n");
3853 /* tp->lock is held. */
3854 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 struct tg3_firmware_hdr *fw_hdr;
3858 if (!tg3_flag(tp, NO_NVRAM))
3861 if (tg3_validate_rxcpu_state(tp))
3867 /* This firmware blob has a different format than older firmware
3868 * releases as given below. The main difference is we have fragmented
3869 * data to be written to non-contiguous locations.
3871 * In the beginning we have a firmware header identical to other
3872 * firmware which consists of version, base addr and length. The length
3873 * here is unused and set to 0xffffffff.
3875 * This is followed by a series of firmware fragments which are
3876 * individually identical to previous firmware. i.e. they have the
3877 * firmware header and followed by data for that fragment. The version
3878 * field of the individual fragment header is unused.
3881 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3882 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885 if (tg3_rxcpu_pause(tp))
3888 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3889 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891 tg3_rxcpu_resume(tp);
3894 /* tp->lock is held. */
3895 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 const struct tg3_firmware_hdr *fw_hdr;
3898 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901 if (!tg3_flag(tp, FW_TSO))
3904 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906 /* Firmware blob starts with version numbers, followed by
3907 start address and length. We are setting complete length.
3908 length = end_address_of_bss - start_address_of_text.
3909 Remainder is the blob to be loaded contiguously
3910 from start address. */
3912 cpu_scratch_size = tp->fw_len;
3914 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3915 cpu_base = RX_CPU_BASE;
3916 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918 cpu_base = TX_CPU_BASE;
3919 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3920 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923 err = tg3_load_firmware_cpu(tp, cpu_base,
3924 cpu_scratch_base, cpu_scratch_size,
3929 /* Now startup the cpu. */
3930 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3931 be32_to_cpu(fw_hdr->base_addr));
3934 "%s fails to set CPU PC, is %08x should be %08x\n",
3935 __func__, tr32(cpu_base + CPU_PC),
3936 be32_to_cpu(fw_hdr->base_addr));
3940 tg3_resume_cpu(tp, cpu_base);
3944 /* tp->lock is held. */
3945 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3948 u32 addr_high, addr_low;
3950 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3951 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3952 (mac_addr[4] << 8) | mac_addr[5]);
3955 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3956 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3960 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3964 /* tp->lock is held. */
3965 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3970 for (i = 0; i < 4; i++) {
3971 if (i == 1 && skip_mac_1)
3973 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3977 tg3_asic_rev(tp) == ASIC_REV_5704) {
3978 for (i = 4; i < 16; i++)
3979 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 addr_high = (tp->dev->dev_addr[0] +
3983 tp->dev->dev_addr[1] +
3984 tp->dev->dev_addr[2] +
3985 tp->dev->dev_addr[3] +
3986 tp->dev->dev_addr[4] +
3987 tp->dev->dev_addr[5]) &
3988 TX_BACKOFF_SEED_MASK;
3989 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 static void tg3_enable_register_access(struct tg3 *tp)
3995 * Make sure register accesses (indirect or otherwise) will function
3998 pci_write_config_dword(tp->pdev,
3999 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 static int tg3_power_up(struct tg3 *tp)
4006 tg3_enable_register_access(tp);
4008 err = pci_set_power_state(tp->pdev, PCI_D0);
4010 /* Switch out of Vaux if it is a NIC */
4011 tg3_pwrsrc_switch_to_vmain(tp);
4013 netdev_err(tp->dev, "Transition to D0 failed\n");
4019 static int tg3_setup_phy(struct tg3 *, bool);
4021 static int tg3_power_down_prepare(struct tg3 *tp)
4024 bool device_should_wake, do_low_power;
4026 tg3_enable_register_access(tp);
4028 /* Restore the CLKREQ setting. */
4029 if (tg3_flag(tp, CLKREQ_BUG))
4030 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4031 PCI_EXP_LNKCTL_CLKREQ_EN);
4033 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4034 tw32(TG3PCI_MISC_HOST_CTRL,
4035 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4038 tg3_flag(tp, WOL_ENABLE);
4040 if (tg3_flag(tp, USE_PHYLIB)) {
4041 do_low_power = false;
4042 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4043 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4044 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4045 struct phy_device *phydev;
4048 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052 tp->link_config.speed = phydev->speed;
4053 tp->link_config.duplex = phydev->duplex;
4054 tp->link_config.autoneg = phydev->autoneg;
4055 ethtool_convert_link_mode_to_legacy_u32(
4056 &tp->link_config.advertising,
4057 phydev->advertising);
4059 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4060 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4067 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4068 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4069 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4081 linkmode_copy(phydev->advertising, advertising);
4082 phy_start_aneg(phydev);
4084 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4085 if (phyid != PHY_ID_BCMAC131) {
4086 phyid &= PHY_BCM_OUI_MASK;
4087 if (phyid == PHY_BCM_OUI_1 ||
4088 phyid == PHY_BCM_OUI_2 ||
4089 phyid == PHY_BCM_OUI_3)
4090 do_low_power = true;
4094 do_low_power = true;
4096 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4097 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4100 tg3_setup_phy(tp, false);
4103 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 val = tr32(GRC_VCPU_EXT_CTRL);
4107 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4108 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4112 for (i = 0; i < 200; i++) {
4113 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4114 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4119 if (tg3_flag(tp, WOL_CAP))
4120 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4121 WOL_DRV_STATE_SHUTDOWN |
4125 if (device_should_wake) {
4128 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4131 tg3_phy_auxctl_write(tp,
4132 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4133 MII_TG3_AUXCTL_PCTL_WOL_EN |
4134 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4135 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4139 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4140 mac_mode = MAC_MODE_PORT_MODE_GMII;
4141 else if (tp->phy_flags &
4142 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4143 if (tp->link_config.active_speed == SPEED_1000)
4144 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 mac_mode = MAC_MODE_PORT_MODE_MII;
4148 mac_mode = MAC_MODE_PORT_MODE_MII;
4150 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4151 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4152 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4153 SPEED_100 : SPEED_10;
4154 if (tg3_5700_link_polarity(tp, speed))
4155 mac_mode |= MAC_MODE_LINK_POLARITY;
4157 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4160 mac_mode = MAC_MODE_PORT_MODE_TBI;
4163 if (!tg3_flag(tp, 5750_PLUS))
4164 tw32(MAC_LED_CTRL, tp->led_ctrl);
4166 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4167 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4168 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4169 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171 if (tg3_flag(tp, ENABLE_APE))
4172 mac_mode |= MAC_MODE_APE_TX_EN |
4173 MAC_MODE_APE_RX_EN |
4174 MAC_MODE_TDE_ENABLE;
4176 tw32_f(MAC_MODE, mac_mode);
4179 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4183 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4184 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4185 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4188 base_val = tp->pci_clock_ctrl;
4189 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4190 CLOCK_CTRL_TXCLK_DISABLE);
4192 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4193 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4194 } else if (tg3_flag(tp, 5780_CLASS) ||
4195 tg3_flag(tp, CPMU_PRESENT) ||
4196 tg3_asic_rev(tp) == ASIC_REV_5906) {
4198 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4199 u32 newbits1, newbits2;
4201 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4202 tg3_asic_rev(tp) == ASIC_REV_5701) {
4203 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4204 CLOCK_CTRL_TXCLK_DISABLE |
4206 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4207 } else if (tg3_flag(tp, 5705_PLUS)) {
4208 newbits1 = CLOCK_CTRL_625_CORE;
4209 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211 newbits1 = CLOCK_CTRL_ALTCLK;
4212 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4218 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4221 if (!tg3_flag(tp, 5705_PLUS)) {
4224 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4225 tg3_asic_rev(tp) == ASIC_REV_5701) {
4226 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4227 CLOCK_CTRL_TXCLK_DISABLE |
4228 CLOCK_CTRL_44MHZ_CORE);
4230 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4233 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4234 tp->pci_clock_ctrl | newbits3, 40);
4238 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4239 tg3_power_down_phy(tp, do_low_power);
4241 tg3_frob_aux_power(tp, true);
4243 /* Workaround for unstable PLL clock */
4244 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4245 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4246 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4247 u32 val = tr32(0x7d00);
4249 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251 if (!tg3_flag(tp, ENABLE_ASF)) {
4254 err = tg3_nvram_lock(tp);
4255 tg3_halt_cpu(tp, RX_CPU_BASE);
4257 tg3_nvram_unlock(tp);
4261 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4268 static void tg3_power_down(struct tg3 *tp)
4270 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4271 pci_set_power_state(tp->pdev, PCI_D3hot);
4274 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4277 case MII_TG3_AUX_STAT_10HALF:
4279 *duplex = DUPLEX_HALF;
4282 case MII_TG3_AUX_STAT_10FULL:
4284 *duplex = DUPLEX_FULL;
4287 case MII_TG3_AUX_STAT_100HALF:
4289 *duplex = DUPLEX_HALF;
4292 case MII_TG3_AUX_STAT_100FULL:
4294 *duplex = DUPLEX_FULL;
4297 case MII_TG3_AUX_STAT_1000HALF:
4298 *speed = SPEED_1000;
4299 *duplex = DUPLEX_HALF;
4302 case MII_TG3_AUX_STAT_1000FULL:
4303 *speed = SPEED_1000;
4304 *duplex = DUPLEX_FULL;
4308 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4309 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4315 *speed = SPEED_UNKNOWN;
4316 *duplex = DUPLEX_UNKNOWN;
4321 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4326 new_adv = ADVERTISE_CSMA;
4327 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4328 new_adv |= mii_advertise_flowctrl(flowctrl);
4330 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4334 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4335 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4338 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4339 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4349 tw32(TG3_CPMU_EEE_MODE,
4350 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4357 /* Advertise 100-BaseTX EEE ability */
4358 if (advertise & ADVERTISED_100baseT_Full)
4359 val |= MDIO_AN_EEE_ADV_100TX;
4360 /* Advertise 1000-BaseT EEE ability */
4361 if (advertise & ADVERTISED_1000baseT_Full)
4362 val |= MDIO_AN_EEE_ADV_1000T;
4364 if (!tp->eee.eee_enabled) {
4366 tp->eee.advertised = 0;
4368 tp->eee.advertised = advertise &
4369 (ADVERTISED_100baseT_Full |
4370 ADVERTISED_1000baseT_Full);
4373 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4377 switch (tg3_asic_rev(tp)) {
4379 case ASIC_REV_57765:
4380 case ASIC_REV_57766:
4382 /* If we advertised any eee advertisements above... */
4384 val = MII_TG3_DSP_TAP26_ALNOKO |
4385 MII_TG3_DSP_TAP26_RMRXSTO |
4386 MII_TG3_DSP_TAP26_OPCSINPT;
4387 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4391 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4392 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4393 MII_TG3_DSP_CH34TP2_HIBW01);
4396 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4405 static void tg3_phy_copper_begin(struct tg3 *tp)
4407 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4408 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4411 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4412 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4413 adv = ADVERTISED_10baseT_Half |
4414 ADVERTISED_10baseT_Full;
4415 if (tg3_flag(tp, WOL_SPEED_100MB))
4416 adv |= ADVERTISED_100baseT_Half |
4417 ADVERTISED_100baseT_Full;
4418 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4419 if (!(tp->phy_flags &
4420 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4421 adv |= ADVERTISED_1000baseT_Half;
4422 adv |= ADVERTISED_1000baseT_Full;
4425 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4427 adv = tp->link_config.advertising;
4428 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4429 adv &= ~(ADVERTISED_1000baseT_Half |
4430 ADVERTISED_1000baseT_Full);
4432 fc = tp->link_config.flowctrl;
4435 tg3_phy_autoneg_cfg(tp, adv, fc);
4437 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4438 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4439 /* Normally during power down we want to autonegotiate
4440 * the lowest possible speed for WOL. However, to avoid
4441 * link flap, we leave it untouched.
4446 tg3_writephy(tp, MII_BMCR,
4447 BMCR_ANENABLE | BMCR_ANRESTART);
4450 u32 bmcr, orig_bmcr;
4452 tp->link_config.active_speed = tp->link_config.speed;
4453 tp->link_config.active_duplex = tp->link_config.duplex;
4455 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4456 /* With autoneg disabled, 5715 only links up when the
4457 * advertisement register has the configured speed
4460 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4464 switch (tp->link_config.speed) {
4470 bmcr |= BMCR_SPEED100;
4474 bmcr |= BMCR_SPEED1000;
4478 if (tp->link_config.duplex == DUPLEX_FULL)
4479 bmcr |= BMCR_FULLDPLX;
4481 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4482 (bmcr != orig_bmcr)) {
4483 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4484 for (i = 0; i < 1500; i++) {
4488 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4489 tg3_readphy(tp, MII_BMSR, &tmp))
4491 if (!(tmp & BMSR_LSTATUS)) {
4496 tg3_writephy(tp, MII_BMCR, bmcr);
4502 static int tg3_phy_pull_config(struct tg3 *tp)
4507 err = tg3_readphy(tp, MII_BMCR, &val);
4511 if (!(val & BMCR_ANENABLE)) {
4512 tp->link_config.autoneg = AUTONEG_DISABLE;
4513 tp->link_config.advertising = 0;
4514 tg3_flag_clear(tp, PAUSE_AUTONEG);
4518 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4520 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4523 tp->link_config.speed = SPEED_10;
4526 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4529 tp->link_config.speed = SPEED_100;
4531 case BMCR_SPEED1000:
4532 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4533 tp->link_config.speed = SPEED_1000;
4541 if (val & BMCR_FULLDPLX)
4542 tp->link_config.duplex = DUPLEX_FULL;
4544 tp->link_config.duplex = DUPLEX_HALF;
4546 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4552 tp->link_config.autoneg = AUTONEG_ENABLE;
4553 tp->link_config.advertising = ADVERTISED_Autoneg;
4554 tg3_flag_set(tp, PAUSE_AUTONEG);
4556 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4559 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4563 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4564 tp->link_config.advertising |= adv | ADVERTISED_TP;
4566 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4568 tp->link_config.advertising |= ADVERTISED_FIBRE;
4571 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4574 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4575 err = tg3_readphy(tp, MII_CTRL1000, &val);
4579 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4581 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4585 adv = tg3_decode_flowctrl_1000X(val);
4586 tp->link_config.flowctrl = adv;
4588 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4589 adv = mii_adv_to_ethtool_adv_x(val);
4592 tp->link_config.advertising |= adv;
4599 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4603 /* Turn off tap power management. */
4604 /* Set Extended packet length bit */
4605 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4607 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4608 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4609 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4611 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4618 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4620 struct ethtool_eee eee;
4622 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4625 tg3_eee_pull_config(tp, &eee);
4627 if (tp->eee.eee_enabled) {
4628 if (tp->eee.advertised != eee.advertised ||
4629 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4630 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4633 /* EEE is disabled but we're advertising */
4641 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4643 u32 advmsk, tgtadv, advertising;
4645 advertising = tp->link_config.advertising;
4646 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4648 advmsk = ADVERTISE_ALL;
4649 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4650 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4651 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4654 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4657 if ((*lcladv & advmsk) != tgtadv)
4660 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4663 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4665 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4669 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4670 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4671 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4672 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4673 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4675 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4678 if (tg3_ctrl != tgtadv)
4685 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4689 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4692 if (tg3_readphy(tp, MII_STAT1000, &val))
4695 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4698 if (tg3_readphy(tp, MII_LPA, rmtadv))
4701 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4702 tp->link_config.rmt_adv = lpeth;
4707 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4709 if (curr_link_up != tp->link_up) {
4711 netif_carrier_on(tp->dev);
4713 netif_carrier_off(tp->dev);
4714 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4715 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4718 tg3_link_report(tp);
4725 static void tg3_clear_mac_status(struct tg3 *tp)
4730 MAC_STATUS_SYNC_CHANGED |
4731 MAC_STATUS_CFG_CHANGED |
4732 MAC_STATUS_MI_COMPLETION |
4733 MAC_STATUS_LNKSTATE_CHANGED);
4737 static void tg3_setup_eee(struct tg3 *tp)
4741 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4742 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4743 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4744 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4746 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4748 tw32_f(TG3_CPMU_EEE_CTRL,
4749 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4751 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4752 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4753 TG3_CPMU_EEEMD_LPI_IN_RX |
4754 TG3_CPMU_EEEMD_EEE_ENABLE;
4756 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4757 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4759 if (tg3_flag(tp, ENABLE_APE))
4760 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4762 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4764 tw32_f(TG3_CPMU_EEE_DBTMR1,
4765 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4766 (tp->eee.tx_lpi_timer & 0xffff));
4768 tw32_f(TG3_CPMU_EEE_DBTMR2,
4769 TG3_CPMU_DBTMR2_APE_TX_2047US |
4770 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4773 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4775 bool current_link_up;
4777 u32 lcl_adv, rmt_adv;
4782 tg3_clear_mac_status(tp);
4784 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4786 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4790 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4792 /* Some third-party PHYs need to be reset on link going
4795 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4796 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4797 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4799 tg3_readphy(tp, MII_BMSR, &bmsr);
4800 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4801 !(bmsr & BMSR_LSTATUS))
4807 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4808 tg3_readphy(tp, MII_BMSR, &bmsr);
4809 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4810 !tg3_flag(tp, INIT_COMPLETE))
4813 if (!(bmsr & BMSR_LSTATUS)) {
4814 err = tg3_init_5401phy_dsp(tp);
4818 tg3_readphy(tp, MII_BMSR, &bmsr);
4819 for (i = 0; i < 1000; i++) {
4821 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4822 (bmsr & BMSR_LSTATUS)) {
4828 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4829 TG3_PHY_REV_BCM5401_B0 &&
4830 !(bmsr & BMSR_LSTATUS) &&
4831 tp->link_config.active_speed == SPEED_1000) {
4832 err = tg3_phy_reset(tp);
4834 err = tg3_init_5401phy_dsp(tp);
4839 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4840 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4841 /* 5701 {A0,B0} CRC bug workaround */
4842 tg3_writephy(tp, 0x15, 0x0a75);
4843 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4848 /* Clear pending interrupts... */
4849 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4850 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4853 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4854 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4855 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4857 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4858 tg3_asic_rev(tp) == ASIC_REV_5701) {
4859 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4860 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4861 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4863 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4866 current_link_up = false;
4867 current_speed = SPEED_UNKNOWN;
4868 current_duplex = DUPLEX_UNKNOWN;
4869 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4870 tp->link_config.rmt_adv = 0;
4872 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4873 err = tg3_phy_auxctl_read(tp,
4874 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4876 if (!err && !(val & (1 << 10))) {
4877 tg3_phy_auxctl_write(tp,
4878 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4885 for (i = 0; i < 100; i++) {
4886 tg3_readphy(tp, MII_BMSR, &bmsr);
4887 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4888 (bmsr & BMSR_LSTATUS))
4893 if (bmsr & BMSR_LSTATUS) {
4896 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4897 for (i = 0; i < 2000; i++) {
4899 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4904 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4909 for (i = 0; i < 200; i++) {
4910 tg3_readphy(tp, MII_BMCR, &bmcr);
4911 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4913 if (bmcr && bmcr != 0x7fff)
4921 tp->link_config.active_speed = current_speed;
4922 tp->link_config.active_duplex = current_duplex;
4924 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4925 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4927 if ((bmcr & BMCR_ANENABLE) &&
4929 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4930 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4931 current_link_up = true;
4933 /* EEE settings changes take effect only after a phy
4934 * reset. If we have skipped a reset due to Link Flap
4935 * Avoidance being enabled, do it now.
4937 if (!eee_config_ok &&
4938 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4944 if (!(bmcr & BMCR_ANENABLE) &&
4945 tp->link_config.speed == current_speed &&
4946 tp->link_config.duplex == current_duplex) {
4947 current_link_up = true;
4951 if (current_link_up &&
4952 tp->link_config.active_duplex == DUPLEX_FULL) {
4955 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4956 reg = MII_TG3_FET_GEN_STAT;
4957 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4959 reg = MII_TG3_EXT_STAT;
4960 bit = MII_TG3_EXT_STAT_MDIX;
4963 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4964 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4966 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4971 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4972 tg3_phy_copper_begin(tp);
4974 if (tg3_flag(tp, ROBOSWITCH)) {
4975 current_link_up = true;
4976 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4977 current_speed = SPEED_1000;
4978 current_duplex = DUPLEX_FULL;
4979 tp->link_config.active_speed = current_speed;
4980 tp->link_config.active_duplex = current_duplex;
4983 tg3_readphy(tp, MII_BMSR, &bmsr);
4984 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4985 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4986 current_link_up = true;
4989 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4990 if (current_link_up) {
4991 if (tp->link_config.active_speed == SPEED_100 ||
4992 tp->link_config.active_speed == SPEED_10)
4993 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4995 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4996 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4997 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001 /* In order for the 5750 core in BCM4785 chip to work properly
5002 * in RGMII mode, the Led Control Register must be set up.
5004 if (tg3_flag(tp, RGMII_MODE)) {
5005 u32 led_ctrl = tr32(MAC_LED_CTRL);
5006 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5008 if (tp->link_config.active_speed == SPEED_10)
5009 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5010 else if (tp->link_config.active_speed == SPEED_100)
5011 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5012 LED_CTRL_100MBPS_ON);
5013 else if (tp->link_config.active_speed == SPEED_1000)
5014 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5015 LED_CTRL_1000MBPS_ON);
5017 tw32(MAC_LED_CTRL, led_ctrl);
5021 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5022 if (tp->link_config.active_duplex == DUPLEX_HALF)
5023 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5025 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5026 if (current_link_up &&
5027 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5028 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5030 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5033 /* ??? Without this setting Netgear GA302T PHY does not
5034 * ??? send/receive packets...
5036 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5037 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5038 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5039 tw32_f(MAC_MI_MODE, tp->mi_mode);
5043 tw32_f(MAC_MODE, tp->mac_mode);
5046 tg3_phy_eee_adjust(tp, current_link_up);
5048 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5049 /* Polled via timer. */
5050 tw32_f(MAC_EVENT, 0);
5052 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5056 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5058 tp->link_config.active_speed == SPEED_1000 &&
5059 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5062 (MAC_STATUS_SYNC_CHANGED |
5063 MAC_STATUS_CFG_CHANGED));
5066 NIC_SRAM_FIRMWARE_MBOX,
5067 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5070 /* Prevent send BD corruption. */
5071 if (tg3_flag(tp, CLKREQ_BUG)) {
5072 if (tp->link_config.active_speed == SPEED_100 ||
5073 tp->link_config.active_speed == SPEED_10)
5074 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5075 PCI_EXP_LNKCTL_CLKREQ_EN);
5077 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5078 PCI_EXP_LNKCTL_CLKREQ_EN);
5081 tg3_test_and_report_link_chg(tp, current_link_up);
5086 struct tg3_fiber_aneginfo {
5088 #define ANEG_STATE_UNKNOWN 0
5089 #define ANEG_STATE_AN_ENABLE 1
5090 #define ANEG_STATE_RESTART_INIT 2
5091 #define ANEG_STATE_RESTART 3
5092 #define ANEG_STATE_DISABLE_LINK_OK 4
5093 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5094 #define ANEG_STATE_ABILITY_DETECT 6
5095 #define ANEG_STATE_ACK_DETECT_INIT 7
5096 #define ANEG_STATE_ACK_DETECT 8
5097 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5098 #define ANEG_STATE_COMPLETE_ACK 10
5099 #define ANEG_STATE_IDLE_DETECT_INIT 11
5100 #define ANEG_STATE_IDLE_DETECT 12
5101 #define ANEG_STATE_LINK_OK 13
5102 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5103 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5106 #define MR_AN_ENABLE 0x00000001
5107 #define MR_RESTART_AN 0x00000002
5108 #define MR_AN_COMPLETE 0x00000004
5109 #define MR_PAGE_RX 0x00000008
5110 #define MR_NP_LOADED 0x00000010
5111 #define MR_TOGGLE_TX 0x00000020
5112 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5113 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5114 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5115 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5116 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5117 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5118 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5119 #define MR_TOGGLE_RX 0x00002000
5120 #define MR_NP_RX 0x00004000
5122 #define MR_LINK_OK 0x80000000
5124 unsigned long link_time, cur_time;
5126 u32 ability_match_cfg;
5127 int ability_match_count;
5129 char ability_match, idle_match, ack_match;
5131 u32 txconfig, rxconfig;
5132 #define ANEG_CFG_NP 0x00000080
5133 #define ANEG_CFG_ACK 0x00000040
5134 #define ANEG_CFG_RF2 0x00000020
5135 #define ANEG_CFG_RF1 0x00000010
5136 #define ANEG_CFG_PS2 0x00000001
5137 #define ANEG_CFG_PS1 0x00008000
5138 #define ANEG_CFG_HD 0x00004000
5139 #define ANEG_CFG_FD 0x00002000
5140 #define ANEG_CFG_INVAL 0x00001f06
5145 #define ANEG_TIMER_ENAB 2
5146 #define ANEG_FAILED -1
5148 #define ANEG_STATE_SETTLE_TIME 10000
5150 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5151 struct tg3_fiber_aneginfo *ap)
5154 unsigned long delta;
5158 if (ap->state == ANEG_STATE_UNKNOWN) {
5162 ap->ability_match_cfg = 0;
5163 ap->ability_match_count = 0;
5164 ap->ability_match = 0;
5170 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5171 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5173 if (rx_cfg_reg != ap->ability_match_cfg) {
5174 ap->ability_match_cfg = rx_cfg_reg;
5175 ap->ability_match = 0;
5176 ap->ability_match_count = 0;
5178 if (++ap->ability_match_count > 1) {
5179 ap->ability_match = 1;
5180 ap->ability_match_cfg = rx_cfg_reg;
5183 if (rx_cfg_reg & ANEG_CFG_ACK)
5191 ap->ability_match_cfg = 0;
5192 ap->ability_match_count = 0;
5193 ap->ability_match = 0;
5199 ap->rxconfig = rx_cfg_reg;
5202 switch (ap->state) {
5203 case ANEG_STATE_UNKNOWN:
5204 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5205 ap->state = ANEG_STATE_AN_ENABLE;
5208 case ANEG_STATE_AN_ENABLE:
5209 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5210 if (ap->flags & MR_AN_ENABLE) {
5213 ap->ability_match_cfg = 0;
5214 ap->ability_match_count = 0;
5215 ap->ability_match = 0;
5219 ap->state = ANEG_STATE_RESTART_INIT;
5221 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5225 case ANEG_STATE_RESTART_INIT:
5226 ap->link_time = ap->cur_time;
5227 ap->flags &= ~(MR_NP_LOADED);
5229 tw32(MAC_TX_AUTO_NEG, 0);
5230 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5231 tw32_f(MAC_MODE, tp->mac_mode);
5234 ret = ANEG_TIMER_ENAB;
5235 ap->state = ANEG_STATE_RESTART;
5238 case ANEG_STATE_RESTART:
5239 delta = ap->cur_time - ap->link_time;
5240 if (delta > ANEG_STATE_SETTLE_TIME)
5241 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5243 ret = ANEG_TIMER_ENAB;
5246 case ANEG_STATE_DISABLE_LINK_OK:
5250 case ANEG_STATE_ABILITY_DETECT_INIT:
5251 ap->flags &= ~(MR_TOGGLE_TX);
5252 ap->txconfig = ANEG_CFG_FD;
5253 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5254 if (flowctrl & ADVERTISE_1000XPAUSE)
5255 ap->txconfig |= ANEG_CFG_PS1;
5256 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5257 ap->txconfig |= ANEG_CFG_PS2;
5258 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5259 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5260 tw32_f(MAC_MODE, tp->mac_mode);
5263 ap->state = ANEG_STATE_ABILITY_DETECT;
5266 case ANEG_STATE_ABILITY_DETECT:
5267 if (ap->ability_match != 0 && ap->rxconfig != 0)
5268 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5271 case ANEG_STATE_ACK_DETECT_INIT:
5272 ap->txconfig |= ANEG_CFG_ACK;
5273 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5274 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5275 tw32_f(MAC_MODE, tp->mac_mode);
5278 ap->state = ANEG_STATE_ACK_DETECT;
5281 case ANEG_STATE_ACK_DETECT:
5282 if (ap->ack_match != 0) {
5283 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5284 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5285 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5287 ap->state = ANEG_STATE_AN_ENABLE;
5289 } else if (ap->ability_match != 0 &&
5290 ap->rxconfig == 0) {
5291 ap->state = ANEG_STATE_AN_ENABLE;
5295 case ANEG_STATE_COMPLETE_ACK_INIT:
5296 if (ap->rxconfig & ANEG_CFG_INVAL) {
5300 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5301 MR_LP_ADV_HALF_DUPLEX |
5302 MR_LP_ADV_SYM_PAUSE |
5303 MR_LP_ADV_ASYM_PAUSE |
5304 MR_LP_ADV_REMOTE_FAULT1 |
5305 MR_LP_ADV_REMOTE_FAULT2 |
5306 MR_LP_ADV_NEXT_PAGE |
5309 if (ap->rxconfig & ANEG_CFG_FD)
5310 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5311 if (ap->rxconfig & ANEG_CFG_HD)
5312 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5313 if (ap->rxconfig & ANEG_CFG_PS1)
5314 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5315 if (ap->rxconfig & ANEG_CFG_PS2)
5316 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5317 if (ap->rxconfig & ANEG_CFG_RF1)
5318 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5319 if (ap->rxconfig & ANEG_CFG_RF2)
5320 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5321 if (ap->rxconfig & ANEG_CFG_NP)
5322 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5324 ap->link_time = ap->cur_time;
5326 ap->flags ^= (MR_TOGGLE_TX);
5327 if (ap->rxconfig & 0x0008)
5328 ap->flags |= MR_TOGGLE_RX;
5329 if (ap->rxconfig & ANEG_CFG_NP)
5330 ap->flags |= MR_NP_RX;
5331 ap->flags |= MR_PAGE_RX;
5333 ap->state = ANEG_STATE_COMPLETE_ACK;
5334 ret = ANEG_TIMER_ENAB;
5337 case ANEG_STATE_COMPLETE_ACK:
5338 if (ap->ability_match != 0 &&
5339 ap->rxconfig == 0) {
5340 ap->state = ANEG_STATE_AN_ENABLE;
5343 delta = ap->cur_time - ap->link_time;
5344 if (delta > ANEG_STATE_SETTLE_TIME) {
5345 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5346 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5348 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5349 !(ap->flags & MR_NP_RX)) {
5350 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5358 case ANEG_STATE_IDLE_DETECT_INIT:
5359 ap->link_time = ap->cur_time;
5360 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5361 tw32_f(MAC_MODE, tp->mac_mode);
5364 ap->state = ANEG_STATE_IDLE_DETECT;
5365 ret = ANEG_TIMER_ENAB;
5368 case ANEG_STATE_IDLE_DETECT:
5369 if (ap->ability_match != 0 &&
5370 ap->rxconfig == 0) {
5371 ap->state = ANEG_STATE_AN_ENABLE;
5374 delta = ap->cur_time - ap->link_time;
5375 if (delta > ANEG_STATE_SETTLE_TIME) {
5376 /* XXX another gem from the Broadcom driver :( */
5377 ap->state = ANEG_STATE_LINK_OK;
5381 case ANEG_STATE_LINK_OK:
5382 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5386 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5387 /* ??? unimplemented */
5390 case ANEG_STATE_NEXT_PAGE_WAIT:
5391 /* ??? unimplemented */
5402 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5405 struct tg3_fiber_aneginfo aninfo;
5406 int status = ANEG_FAILED;
5410 tw32_f(MAC_TX_AUTO_NEG, 0);
5412 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5413 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5416 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5419 memset(&aninfo, 0, sizeof(aninfo));
5420 aninfo.flags |= MR_AN_ENABLE;
5421 aninfo.state = ANEG_STATE_UNKNOWN;
5422 aninfo.cur_time = 0;
5424 while (++tick < 195000) {
5425 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5426 if (status == ANEG_DONE || status == ANEG_FAILED)
5432 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5433 tw32_f(MAC_MODE, tp->mac_mode);
5436 *txflags = aninfo.txconfig;
5437 *rxflags = aninfo.flags;
5439 if (status == ANEG_DONE &&
5440 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5441 MR_LP_ADV_FULL_DUPLEX)))
5447 static void tg3_init_bcm8002(struct tg3 *tp)
5449 u32 mac_status = tr32(MAC_STATUS);
5452 /* Reset when initting first time or we have a link. */
5453 if (tg3_flag(tp, INIT_COMPLETE) &&
5454 !(mac_status & MAC_STATUS_PCS_SYNCED))
5457 /* Set PLL lock range. */
5458 tg3_writephy(tp, 0x16, 0x8007);
5461 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5463 /* Wait for reset to complete. */
5464 /* XXX schedule_timeout() ... */
5465 for (i = 0; i < 500; i++)
5468 /* Config mode; select PMA/Ch 1 regs. */
5469 tg3_writephy(tp, 0x10, 0x8411);
5471 /* Enable auto-lock and comdet, select txclk for tx. */
5472 tg3_writephy(tp, 0x11, 0x0a10);
5474 tg3_writephy(tp, 0x18, 0x00a0);
5475 tg3_writephy(tp, 0x16, 0x41ff);
5477 /* Assert and deassert POR. */
5478 tg3_writephy(tp, 0x13, 0x0400);
5480 tg3_writephy(tp, 0x13, 0x0000);
5482 tg3_writephy(tp, 0x11, 0x0a50);
5484 tg3_writephy(tp, 0x11, 0x0a10);
5486 /* Wait for signal to stabilize */
5487 /* XXX schedule_timeout() ... */
5488 for (i = 0; i < 15000; i++)
5491 /* Deselect the channel register so we can read the PHYID
5494 tg3_writephy(tp, 0x10, 0x8011);
5497 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5500 bool current_link_up;
5501 u32 sg_dig_ctrl, sg_dig_status;
5502 u32 serdes_cfg, expected_sg_dig_ctrl;
5503 int workaround, port_a;
5508 current_link_up = false;
5510 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5511 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5513 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5516 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5517 /* preserve bits 20-23 for voltage regulator */
5518 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5521 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5523 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5524 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5526 u32 val = serdes_cfg;
5532 tw32_f(MAC_SERDES_CFG, val);
5535 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5537 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5538 tg3_setup_flow_control(tp, 0, 0);
5539 current_link_up = true;
5544 /* Want auto-negotiation. */
5545 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5547 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548 if (flowctrl & ADVERTISE_1000XPAUSE)
5549 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5550 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5551 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5553 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5554 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5555 tp->serdes_counter &&
5556 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5557 MAC_STATUS_RCVD_CFG)) ==
5558 MAC_STATUS_PCS_SYNCED)) {
5559 tp->serdes_counter--;
5560 current_link_up = true;
5565 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5566 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5568 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5570 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5571 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5572 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5573 MAC_STATUS_SIGNAL_DET)) {
5574 sg_dig_status = tr32(SG_DIG_STATUS);
5575 mac_status = tr32(MAC_STATUS);
5577 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5578 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5579 u32 local_adv = 0, remote_adv = 0;
5581 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5582 local_adv |= ADVERTISE_1000XPAUSE;
5583 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5584 local_adv |= ADVERTISE_1000XPSE_ASYM;
5586 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5587 remote_adv |= LPA_1000XPAUSE;
5588 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5589 remote_adv |= LPA_1000XPAUSE_ASYM;
5591 tp->link_config.rmt_adv =
5592 mii_adv_to_ethtool_adv_x(remote_adv);
5594 tg3_setup_flow_control(tp, local_adv, remote_adv);
5595 current_link_up = true;
5596 tp->serdes_counter = 0;
5597 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5598 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5599 if (tp->serdes_counter)
5600 tp->serdes_counter--;
5603 u32 val = serdes_cfg;
5610 tw32_f(MAC_SERDES_CFG, val);
5613 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5616 /* Link parallel detection - link is up */
5617 /* only if we have PCS_SYNC and not */
5618 /* receiving config code words */
5619 mac_status = tr32(MAC_STATUS);
5620 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5621 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5622 tg3_setup_flow_control(tp, 0, 0);
5623 current_link_up = true;
5625 TG3_PHYFLG_PARALLEL_DETECT;
5626 tp->serdes_counter =
5627 SERDES_PARALLEL_DET_TIMEOUT;
5629 goto restart_autoneg;
5633 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5634 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5638 return current_link_up;
5641 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5643 bool current_link_up = false;
5645 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5648 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5649 u32 txflags, rxflags;
5652 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5653 u32 local_adv = 0, remote_adv = 0;
5655 if (txflags & ANEG_CFG_PS1)
5656 local_adv |= ADVERTISE_1000XPAUSE;
5657 if (txflags & ANEG_CFG_PS2)
5658 local_adv |= ADVERTISE_1000XPSE_ASYM;
5660 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5661 remote_adv |= LPA_1000XPAUSE;
5662 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5663 remote_adv |= LPA_1000XPAUSE_ASYM;
5665 tp->link_config.rmt_adv =
5666 mii_adv_to_ethtool_adv_x(remote_adv);
5668 tg3_setup_flow_control(tp, local_adv, remote_adv);
5670 current_link_up = true;
5672 for (i = 0; i < 30; i++) {
5675 (MAC_STATUS_SYNC_CHANGED |
5676 MAC_STATUS_CFG_CHANGED));
5678 if ((tr32(MAC_STATUS) &
5679 (MAC_STATUS_SYNC_CHANGED |
5680 MAC_STATUS_CFG_CHANGED)) == 0)
5684 mac_status = tr32(MAC_STATUS);
5685 if (!current_link_up &&
5686 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5687 !(mac_status & MAC_STATUS_RCVD_CFG))
5688 current_link_up = true;
5690 tg3_setup_flow_control(tp, 0, 0);
5692 /* Forcing 1000FD link up. */
5693 current_link_up = true;
5695 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5698 tw32_f(MAC_MODE, tp->mac_mode);
5703 return current_link_up;
5706 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5709 u32 orig_active_speed;
5710 u8 orig_active_duplex;
5712 bool current_link_up;
5715 orig_pause_cfg = tp->link_config.active_flowctrl;
5716 orig_active_speed = tp->link_config.active_speed;
5717 orig_active_duplex = tp->link_config.active_duplex;
5719 if (!tg3_flag(tp, HW_AUTONEG) &&
5721 tg3_flag(tp, INIT_COMPLETE)) {
5722 mac_status = tr32(MAC_STATUS);
5723 mac_status &= (MAC_STATUS_PCS_SYNCED |
5724 MAC_STATUS_SIGNAL_DET |
5725 MAC_STATUS_CFG_CHANGED |
5726 MAC_STATUS_RCVD_CFG);
5727 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5728 MAC_STATUS_SIGNAL_DET)) {
5729 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5730 MAC_STATUS_CFG_CHANGED));
5735 tw32_f(MAC_TX_AUTO_NEG, 0);
5737 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5738 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5739 tw32_f(MAC_MODE, tp->mac_mode);
5742 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5743 tg3_init_bcm8002(tp);
5745 /* Enable link change event even when serdes polling. */
5746 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5749 tp->link_config.rmt_adv = 0;
5750 mac_status = tr32(MAC_STATUS);
5752 if (tg3_flag(tp, HW_AUTONEG))
5753 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5755 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757 tp->napi[0].hw_status->status =
5758 (SD_STATUS_UPDATED |
5759 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761 for (i = 0; i < 100; i++) {
5762 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5763 MAC_STATUS_CFG_CHANGED));
5765 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5766 MAC_STATUS_CFG_CHANGED |
5767 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5771 mac_status = tr32(MAC_STATUS);
5772 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5773 current_link_up = false;
5774 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5775 tp->serdes_counter == 0) {
5776 tw32_f(MAC_MODE, (tp->mac_mode |
5777 MAC_MODE_SEND_CONFIGS));
5779 tw32_f(MAC_MODE, tp->mac_mode);
5783 if (current_link_up) {
5784 tp->link_config.active_speed = SPEED_1000;
5785 tp->link_config.active_duplex = DUPLEX_FULL;
5786 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5787 LED_CTRL_LNKLED_OVERRIDE |
5788 LED_CTRL_1000MBPS_ON));
5790 tp->link_config.active_speed = SPEED_UNKNOWN;
5791 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5792 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793 LED_CTRL_LNKLED_OVERRIDE |
5794 LED_CTRL_TRAFFIC_OVERRIDE));
5797 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5798 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5799 if (orig_pause_cfg != now_pause_cfg ||
5800 orig_active_speed != tp->link_config.active_speed ||
5801 orig_active_duplex != tp->link_config.active_duplex)
5802 tg3_link_report(tp);
5808 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5812 u32 current_speed = SPEED_UNKNOWN;
5813 u8 current_duplex = DUPLEX_UNKNOWN;
5814 bool current_link_up = false;
5815 u32 local_adv, remote_adv, sgsr;
5817 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5818 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5819 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5820 (sgsr & SERDES_TG3_SGMII_MODE)) {
5825 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5828 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830 current_link_up = true;
5831 if (sgsr & SERDES_TG3_SPEED_1000) {
5832 current_speed = SPEED_1000;
5833 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5834 } else if (sgsr & SERDES_TG3_SPEED_100) {
5835 current_speed = SPEED_100;
5836 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838 current_speed = SPEED_10;
5839 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5842 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5843 current_duplex = DUPLEX_FULL;
5845 current_duplex = DUPLEX_HALF;
5848 tw32_f(MAC_MODE, tp->mac_mode);
5851 tg3_clear_mac_status(tp);
5853 goto fiber_setup_done;
5856 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5857 tw32_f(MAC_MODE, tp->mac_mode);
5860 tg3_clear_mac_status(tp);
5865 tp->link_config.rmt_adv = 0;
5867 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5870 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5871 bmsr |= BMSR_LSTATUS;
5873 bmsr &= ~BMSR_LSTATUS;
5876 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5879 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5880 /* do nothing, just check for link up at the end */
5881 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5884 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5885 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5886 ADVERTISE_1000XPAUSE |
5887 ADVERTISE_1000XPSE_ASYM |
5890 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5891 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5894 tg3_writephy(tp, MII_ADVERTISE, newadv);
5895 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5896 tg3_writephy(tp, MII_BMCR, bmcr);
5898 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5899 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5900 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5907 bmcr &= ~BMCR_SPEED1000;
5908 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910 if (tp->link_config.duplex == DUPLEX_FULL)
5911 new_bmcr |= BMCR_FULLDPLX;
5913 if (new_bmcr != bmcr) {
5914 /* BMCR_SPEED1000 is a reserved bit that needs
5915 * to be set on write.
5917 new_bmcr |= BMCR_SPEED1000;
5919 /* Force a linkdown */
5923 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5924 adv &= ~(ADVERTISE_1000XFULL |
5925 ADVERTISE_1000XHALF |
5927 tg3_writephy(tp, MII_ADVERTISE, adv);
5928 tg3_writephy(tp, MII_BMCR, bmcr |
5932 tg3_carrier_off(tp);
5934 tg3_writephy(tp, MII_BMCR, new_bmcr);
5936 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5939 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5940 bmsr |= BMSR_LSTATUS;
5942 bmsr &= ~BMSR_LSTATUS;
5944 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5948 if (bmsr & BMSR_LSTATUS) {
5949 current_speed = SPEED_1000;
5950 current_link_up = true;
5951 if (bmcr & BMCR_FULLDPLX)
5952 current_duplex = DUPLEX_FULL;
5954 current_duplex = DUPLEX_HALF;
5959 if (bmcr & BMCR_ANENABLE) {
5962 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5963 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5964 common = local_adv & remote_adv;
5965 if (common & (ADVERTISE_1000XHALF |
5966 ADVERTISE_1000XFULL)) {
5967 if (common & ADVERTISE_1000XFULL)
5968 current_duplex = DUPLEX_FULL;
5970 current_duplex = DUPLEX_HALF;
5972 tp->link_config.rmt_adv =
5973 mii_adv_to_ethtool_adv_x(remote_adv);
5974 } else if (!tg3_flag(tp, 5780_CLASS)) {
5975 /* Link is up via parallel detect */
5977 current_link_up = false;
5983 if (current_link_up && current_duplex == DUPLEX_FULL)
5984 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5987 if (tp->link_config.active_duplex == DUPLEX_HALF)
5988 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990 tw32_f(MAC_MODE, tp->mac_mode);
5993 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995 tp->link_config.active_speed = current_speed;
5996 tp->link_config.active_duplex = current_duplex;
5998 tg3_test_and_report_link_chg(tp, current_link_up);
6002 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 if (tp->serdes_counter) {
6005 /* Give autoneg time to complete. */
6006 tp->serdes_counter--;
6011 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6014 tg3_readphy(tp, MII_BMCR, &bmcr);
6015 if (bmcr & BMCR_ANENABLE) {
6018 /* Select shadow register 0x1f */
6019 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6020 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022 /* Select expansion interrupt status register */
6023 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6024 MII_TG3_DSP_EXP1_INT_STAT);
6025 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6029 /* We have signal detect and not receiving
6030 * config code words, link is up by parallel
6034 bmcr &= ~BMCR_ANENABLE;
6035 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6036 tg3_writephy(tp, MII_BMCR, bmcr);
6037 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6040 } else if (tp->link_up &&
6041 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6042 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6045 /* Select expansion interrupt status register */
6046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6047 MII_TG3_DSP_EXP1_INT_STAT);
6048 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6052 /* Config code words received, turn on autoneg. */
6053 tg3_readphy(tp, MII_BMCR, &bmcr);
6054 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6062 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6067 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6068 err = tg3_setup_fiber_phy(tp, force_reset);
6069 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6070 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6072 err = tg3_setup_copper_phy(tp, force_reset);
6074 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6077 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6078 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6080 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6085 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6086 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6087 tw32(GRC_MISC_CFG, val);
6090 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6091 (6 << TX_LENGTHS_IPG_SHIFT);
6092 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6093 tg3_asic_rev(tp) == ASIC_REV_5762)
6094 val |= tr32(MAC_TX_LENGTHS) &
6095 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6096 TX_LENGTHS_CNT_DWN_VAL_MSK);
6098 if (tp->link_config.active_speed == SPEED_1000 &&
6099 tp->link_config.active_duplex == DUPLEX_HALF)
6100 tw32(MAC_TX_LENGTHS, val |
6101 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6103 tw32(MAC_TX_LENGTHS, val |
6104 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106 if (!tg3_flag(tp, 5705_PLUS)) {
6108 tw32(HOSTCC_STAT_COAL_TICKS,
6109 tp->coal.stats_block_coalesce_usecs);
6111 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6115 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6116 val = tr32(PCIE_PWR_MGMT_THRESH);
6118 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6121 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6122 tw32(PCIE_PWR_MGMT_THRESH, val);
6128 /* tp->lock must be held */
6129 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6133 ptp_read_system_prets(sts);
6134 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6135 ptp_read_system_postts(sts);
6136 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6141 /* tp->lock must be held */
6142 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6147 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6148 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6149 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6152 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6153 static inline void tg3_full_unlock(struct tg3 *tp);
6154 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 struct tg3 *tp = netdev_priv(dev);
6158 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6159 SOF_TIMESTAMPING_RX_SOFTWARE |
6160 SOF_TIMESTAMPING_SOFTWARE;
6162 if (tg3_flag(tp, PTP_CAPABLE)) {
6163 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6164 SOF_TIMESTAMPING_RX_HARDWARE |
6165 SOF_TIMESTAMPING_RAW_HARDWARE;
6169 info->phc_index = ptp_clock_index(tp->ptp_clock);
6171 info->phc_index = -1;
6173 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6176 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6177 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6182 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6184 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6188 /* Frequency adjustment is performed using hardware with a 24 bit
6189 * accumulator and a programmable correction value. On each clk, the
6190 * correction value gets added to the accumulator and when it
6191 * overflows, the time counter is incremented/decremented.
6193 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6195 tg3_full_lock(tp, 0);
6198 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6199 TG3_EAV_REF_CLK_CORRECT_EN |
6200 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6201 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6203 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6205 tg3_full_unlock(tp);
6210 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6212 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6214 tg3_full_lock(tp, 0);
6215 tp->ptp_adjust += delta;
6216 tg3_full_unlock(tp);
6221 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6222 struct ptp_system_timestamp *sts)
6225 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6227 tg3_full_lock(tp, 0);
6228 ns = tg3_refclk_read(tp, sts);
6229 ns += tp->ptp_adjust;
6230 tg3_full_unlock(tp);
6232 *ts = ns_to_timespec64(ns);
6237 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6238 const struct timespec64 *ts)
6241 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6243 ns = timespec64_to_ns(ts);
6245 tg3_full_lock(tp, 0);
6246 tg3_refclk_write(tp, ns);
6248 tg3_full_unlock(tp);
6253 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6254 struct ptp_clock_request *rq, int on)
6256 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6261 case PTP_CLK_REQ_PEROUT:
6262 /* Reject requests with unsupported flags */
6263 if (rq->perout.flags)
6266 if (rq->perout.index != 0)
6269 tg3_full_lock(tp, 0);
6270 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6271 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6276 nsec = rq->perout.start.sec * 1000000000ULL +
6277 rq->perout.start.nsec;
6279 if (rq->perout.period.sec || rq->perout.period.nsec) {
6280 netdev_warn(tp->dev,
6281 "Device supports only a one-shot timesync output, period must be 0\n");
6286 if (nsec & (1ULL << 63)) {
6287 netdev_warn(tp->dev,
6288 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6293 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6294 tw32(TG3_EAV_WATCHDOG0_MSB,
6295 TG3_EAV_WATCHDOG0_EN |
6296 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6298 tw32(TG3_EAV_REF_CLCK_CTL,
6299 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6301 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6302 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6306 tg3_full_unlock(tp);
6316 static const struct ptp_clock_info tg3_ptp_caps = {
6317 .owner = THIS_MODULE,
6318 .name = "tg3 clock",
6319 .max_adj = 250000000,
6325 .adjfine = tg3_ptp_adjfine,
6326 .adjtime = tg3_ptp_adjtime,
6327 .gettimex64 = tg3_ptp_gettimex,
6328 .settime64 = tg3_ptp_settime,
6329 .enable = tg3_ptp_enable,
6332 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6333 struct skb_shared_hwtstamps *timestamp)
6335 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6336 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6340 /* tp->lock must be held */
6341 static void tg3_ptp_init(struct tg3 *tp)
6343 if (!tg3_flag(tp, PTP_CAPABLE))
6346 /* Initialize the hardware clock to the system time. */
6347 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6349 tp->ptp_info = tg3_ptp_caps;
6352 /* tp->lock must be held */
6353 static void tg3_ptp_resume(struct tg3 *tp)
6355 if (!tg3_flag(tp, PTP_CAPABLE))
6358 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6362 static void tg3_ptp_fini(struct tg3 *tp)
6364 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6367 ptp_clock_unregister(tp->ptp_clock);
6368 tp->ptp_clock = NULL;
6372 static inline int tg3_irq_sync(struct tg3 *tp)
6374 return tp->irq_sync;
6377 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6381 dst = (u32 *)((u8 *)dst + off);
6382 for (i = 0; i < len; i += sizeof(u32))
6383 *dst++ = tr32(off + i);
6386 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6388 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6389 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6390 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6391 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6392 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6393 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6394 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6395 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6396 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6397 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6398 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6399 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6400 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6401 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6402 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6403 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6404 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6405 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6406 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6408 if (tg3_flag(tp, SUPPORT_MSIX))
6409 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6411 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6412 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6413 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6414 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6415 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6416 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6417 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6418 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6420 if (!tg3_flag(tp, 5705_PLUS)) {
6421 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6422 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6423 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6426 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6427 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6428 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6429 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6430 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6432 if (tg3_flag(tp, NVRAM))
6433 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6436 static void tg3_dump_state(struct tg3 *tp)
6441 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6445 if (tg3_flag(tp, PCI_EXPRESS)) {
6446 /* Read up to but not including private PCI registers */
6447 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6448 regs[i / sizeof(u32)] = tr32(i);
6450 tg3_dump_legacy_regs(tp, regs);
6452 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6453 if (!regs[i + 0] && !regs[i + 1] &&
6454 !regs[i + 2] && !regs[i + 3])
6457 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6459 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6464 for (i = 0; i < tp->irq_cnt; i++) {
6465 struct tg3_napi *tnapi = &tp->napi[i];
6467 /* SW status block */
6469 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6471 tnapi->hw_status->status,
6472 tnapi->hw_status->status_tag,
6473 tnapi->hw_status->rx_jumbo_consumer,
6474 tnapi->hw_status->rx_consumer,
6475 tnapi->hw_status->rx_mini_consumer,
6476 tnapi->hw_status->idx[0].rx_producer,
6477 tnapi->hw_status->idx[0].tx_consumer);
6480 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6482 tnapi->last_tag, tnapi->last_irq_tag,
6483 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6485 tnapi->prodring.rx_std_prod_idx,
6486 tnapi->prodring.rx_std_cons_idx,
6487 tnapi->prodring.rx_jmb_prod_idx,
6488 tnapi->prodring.rx_jmb_cons_idx);
6492 /* This is called whenever we suspect that the system chipset is re-
6493 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6494 * is bogus tx completions. We try to recover by setting the
6495 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6498 static void tg3_tx_recover(struct tg3 *tp)
6500 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6501 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6503 netdev_warn(tp->dev,
6504 "The system may be re-ordering memory-mapped I/O "
6505 "cycles to the network device, attempting to recover. "
6506 "Please report the problem to the driver maintainer "
6507 "and include system chipset information.\n");
6509 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6512 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6514 /* Tell compiler to fetch tx indices from memory. */
6516 return tnapi->tx_pending -
6517 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6520 /* Tigon3 never reports partial packet sends. So we do not
6521 * need special logic to handle SKBs that have not had all
6522 * of their frags sent yet, like SunGEM does.
6524 static void tg3_tx(struct tg3_napi *tnapi)
6526 struct tg3 *tp = tnapi->tp;
6527 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6528 u32 sw_idx = tnapi->tx_cons;
6529 struct netdev_queue *txq;
6530 int index = tnapi - tp->napi;
6531 unsigned int pkts_compl = 0, bytes_compl = 0;
6533 if (tg3_flag(tp, ENABLE_TSS))
6536 txq = netdev_get_tx_queue(tp->dev, index);
6538 while (sw_idx != hw_idx) {
6539 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6540 struct sk_buff *skb = ri->skb;
6543 if (unlikely(skb == NULL)) {
6548 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6549 struct skb_shared_hwtstamps timestamp;
6550 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6551 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6553 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6555 skb_tstamp_tx(skb, ×tamp);
6558 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6559 skb_headlen(skb), DMA_TO_DEVICE);
6563 while (ri->fragmented) {
6564 ri->fragmented = false;
6565 sw_idx = NEXT_TX(sw_idx);
6566 ri = &tnapi->tx_buffers[sw_idx];
6569 sw_idx = NEXT_TX(sw_idx);
6571 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6572 ri = &tnapi->tx_buffers[sw_idx];
6573 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6576 dma_unmap_page(&tp->pdev->dev,
6577 dma_unmap_addr(ri, mapping),
6578 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6581 while (ri->fragmented) {
6582 ri->fragmented = false;
6583 sw_idx = NEXT_TX(sw_idx);
6584 ri = &tnapi->tx_buffers[sw_idx];
6587 sw_idx = NEXT_TX(sw_idx);
6591 bytes_compl += skb->len;
6593 dev_consume_skb_any(skb);
6595 if (unlikely(tx_bug)) {
6601 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6603 tnapi->tx_cons = sw_idx;
6605 /* Need to make the tx_cons update visible to tg3_start_xmit()
6606 * before checking for netif_queue_stopped(). Without the
6607 * memory barrier, there is a small possibility that tg3_start_xmit()
6608 * will miss it and cause the queue to be stopped forever.
6612 if (unlikely(netif_tx_queue_stopped(txq) &&
6613 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6614 __netif_tx_lock(txq, smp_processor_id());
6615 if (netif_tx_queue_stopped(txq) &&
6616 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6617 netif_tx_wake_queue(txq);
6618 __netif_tx_unlock(txq);
6622 static void tg3_frag_free(bool is_frag, void *data)
6625 skb_free_frag(data);
6630 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6632 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6633 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6638 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6640 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6645 /* Returns size of skb allocated or < 0 on error.
6647 * We only need to fill in the address because the other members
6648 * of the RX descriptor are invariant, see tg3_init_rings.
6650 * Note the purposeful assymetry of cpu vs. chip accesses. For
6651 * posting buffers we only dirty the first cache line of the RX
6652 * descriptor (containing the address). Whereas for the RX status
6653 * buffers the cpu only reads the last cacheline of the RX descriptor
6654 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6656 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6657 u32 opaque_key, u32 dest_idx_unmasked,
6658 unsigned int *frag_size)
6660 struct tg3_rx_buffer_desc *desc;
6661 struct ring_info *map;
6664 int skb_size, data_size, dest_idx;
6666 switch (opaque_key) {
6667 case RXD_OPAQUE_RING_STD:
6668 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6669 desc = &tpr->rx_std[dest_idx];
6670 map = &tpr->rx_std_buffers[dest_idx];
6671 data_size = tp->rx_pkt_map_sz;
6674 case RXD_OPAQUE_RING_JUMBO:
6675 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6676 desc = &tpr->rx_jmb[dest_idx].std;
6677 map = &tpr->rx_jmb_buffers[dest_idx];
6678 data_size = TG3_RX_JMB_MAP_SZ;
6685 /* Do not overwrite any of the map or rp information
6686 * until we are sure we can commit to a new buffer.
6688 * Callers depend upon this behavior and assume that
6689 * we leave everything unchanged if we fail.
6691 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6692 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6693 if (skb_size <= PAGE_SIZE) {
6694 data = napi_alloc_frag(skb_size);
6695 *frag_size = skb_size;
6697 data = kmalloc(skb_size, GFP_ATOMIC);
6703 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6704 data_size, DMA_FROM_DEVICE);
6705 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6706 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6711 dma_unmap_addr_set(map, mapping, mapping);
6713 desc->addr_hi = ((u64)mapping >> 32);
6714 desc->addr_lo = ((u64)mapping & 0xffffffff);
6719 /* We only need to move over in the address because the other
6720 * members of the RX descriptor are invariant. See notes above
6721 * tg3_alloc_rx_data for full details.
6723 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6724 struct tg3_rx_prodring_set *dpr,
6725 u32 opaque_key, int src_idx,
6726 u32 dest_idx_unmasked)
6728 struct tg3 *tp = tnapi->tp;
6729 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6730 struct ring_info *src_map, *dest_map;
6731 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6734 switch (opaque_key) {
6735 case RXD_OPAQUE_RING_STD:
6736 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6737 dest_desc = &dpr->rx_std[dest_idx];
6738 dest_map = &dpr->rx_std_buffers[dest_idx];
6739 src_desc = &spr->rx_std[src_idx];
6740 src_map = &spr->rx_std_buffers[src_idx];
6743 case RXD_OPAQUE_RING_JUMBO:
6744 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6745 dest_desc = &dpr->rx_jmb[dest_idx].std;
6746 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6747 src_desc = &spr->rx_jmb[src_idx].std;
6748 src_map = &spr->rx_jmb_buffers[src_idx];
6755 dest_map->data = src_map->data;
6756 dma_unmap_addr_set(dest_map, mapping,
6757 dma_unmap_addr(src_map, mapping));
6758 dest_desc->addr_hi = src_desc->addr_hi;
6759 dest_desc->addr_lo = src_desc->addr_lo;
6761 /* Ensure that the update to the skb happens after the physical
6762 * addresses have been transferred to the new BD location.
6766 src_map->data = NULL;
6769 /* The RX ring scheme is composed of multiple rings which post fresh
6770 * buffers to the chip, and one special ring the chip uses to report
6771 * status back to the host.
6773 * The special ring reports the status of received packets to the
6774 * host. The chip does not write into the original descriptor the
6775 * RX buffer was obtained from. The chip simply takes the original
6776 * descriptor as provided by the host, updates the status and length
6777 * field, then writes this into the next status ring entry.
6779 * Each ring the host uses to post buffers to the chip is described
6780 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6781 * it is first placed into the on-chip ram. When the packet's length
6782 * is known, it walks down the TG3_BDINFO entries to select the ring.
6783 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6784 * which is within the range of the new packet's length is chosen.
6786 * The "separate ring for rx status" scheme may sound queer, but it makes
6787 * sense from a cache coherency perspective. If only the host writes
6788 * to the buffer post rings, and only the chip writes to the rx status
6789 * rings, then cache lines never move beyond shared-modified state.
6790 * If both the host and chip were to write into the same ring, cache line
6791 * eviction could occur since both entities want it in an exclusive state.
6793 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6795 struct tg3 *tp = tnapi->tp;
6796 u32 work_mask, rx_std_posted = 0;
6797 u32 std_prod_idx, jmb_prod_idx;
6798 u32 sw_idx = tnapi->rx_rcb_ptr;
6801 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6803 hw_idx = *(tnapi->rx_rcb_prod_idx);
6805 * We need to order the read of hw_idx and the read of
6806 * the opaque cookie.
6811 std_prod_idx = tpr->rx_std_prod_idx;
6812 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6813 while (sw_idx != hw_idx && budget > 0) {
6814 struct ring_info *ri;
6815 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6817 struct sk_buff *skb;
6818 dma_addr_t dma_addr;
6819 u32 opaque_key, desc_idx, *post_ptr;
6823 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6824 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6825 if (opaque_key == RXD_OPAQUE_RING_STD) {
6826 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6827 dma_addr = dma_unmap_addr(ri, mapping);
6829 post_ptr = &std_prod_idx;
6831 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6832 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6833 dma_addr = dma_unmap_addr(ri, mapping);
6835 post_ptr = &jmb_prod_idx;
6837 goto next_pkt_nopost;
6839 work_mask |= opaque_key;
6841 if (desc->err_vlan & RXD_ERR_MASK) {
6843 tg3_recycle_rx(tnapi, tpr, opaque_key,
6844 desc_idx, *post_ptr);
6846 /* Other statistics kept track of by card. */
6851 prefetch(data + TG3_RX_OFFSET(tp));
6852 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6855 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6856 RXD_FLAG_PTPSTAT_PTPV1 ||
6857 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858 RXD_FLAG_PTPSTAT_PTPV2) {
6859 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6860 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6863 if (len > TG3_RX_COPY_THRESH(tp)) {
6865 unsigned int frag_size;
6867 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6868 *post_ptr, &frag_size);
6872 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6875 /* Ensure that the update to the data happens
6876 * after the usage of the old DMA mapping.
6882 skb = build_skb(data, frag_size);
6884 tg3_frag_free(frag_size != 0, data);
6885 goto drop_it_no_recycle;
6887 skb_reserve(skb, TG3_RX_OFFSET(tp));
6889 tg3_recycle_rx(tnapi, tpr, opaque_key,
6890 desc_idx, *post_ptr);
6892 skb = netdev_alloc_skb(tp->dev,
6893 len + TG3_RAW_IP_ALIGN);
6895 goto drop_it_no_recycle;
6897 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6898 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6901 data + TG3_RX_OFFSET(tp),
6903 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6904 len, DMA_FROM_DEVICE);
6909 tg3_hwclock_to_timestamp(tp, tstamp,
6910 skb_hwtstamps(skb));
6912 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6913 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6914 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6915 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6916 skb->ip_summed = CHECKSUM_UNNECESSARY;
6918 skb_checksum_none_assert(skb);
6920 skb->protocol = eth_type_trans(skb, tp->dev);
6922 if (len > (tp->dev->mtu + ETH_HLEN) &&
6923 skb->protocol != htons(ETH_P_8021Q) &&
6924 skb->protocol != htons(ETH_P_8021AD)) {
6925 dev_kfree_skb_any(skb);
6926 goto drop_it_no_recycle;
6929 if (desc->type_flags & RXD_FLAG_VLAN &&
6930 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6931 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6932 desc->err_vlan & RXD_VLAN_MASK);
6934 napi_gro_receive(&tnapi->napi, skb);
6942 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6943 tpr->rx_std_prod_idx = std_prod_idx &
6944 tp->rx_std_ring_mask;
6945 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6946 tpr->rx_std_prod_idx);
6947 work_mask &= ~RXD_OPAQUE_RING_STD;
6952 sw_idx &= tp->rx_ret_ring_mask;
6954 /* Refresh hw_idx to see if there is new work */
6955 if (sw_idx == hw_idx) {
6956 hw_idx = *(tnapi->rx_rcb_prod_idx);
6961 /* ACK the status ring. */
6962 tnapi->rx_rcb_ptr = sw_idx;
6963 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6965 /* Refill RX ring(s). */
6966 if (!tg3_flag(tp, ENABLE_RSS)) {
6967 /* Sync BD data before updating mailbox */
6970 if (work_mask & RXD_OPAQUE_RING_STD) {
6971 tpr->rx_std_prod_idx = std_prod_idx &
6972 tp->rx_std_ring_mask;
6973 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6974 tpr->rx_std_prod_idx);
6976 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6977 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6978 tp->rx_jmb_ring_mask;
6979 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6980 tpr->rx_jmb_prod_idx);
6982 } else if (work_mask) {
6983 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6984 * updated before the producer indices can be updated.
6988 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6989 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6991 if (tnapi != &tp->napi[1]) {
6992 tp->rx_refill = true;
6993 napi_schedule(&tp->napi[1].napi);
7000 static void tg3_poll_link(struct tg3 *tp)
7002 /* handle link change and other phy events */
7003 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7004 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7006 if (sblk->status & SD_STATUS_LINK_CHG) {
7007 sblk->status = SD_STATUS_UPDATED |
7008 (sblk->status & ~SD_STATUS_LINK_CHG);
7009 spin_lock(&tp->lock);
7010 if (tg3_flag(tp, USE_PHYLIB)) {
7012 (MAC_STATUS_SYNC_CHANGED |
7013 MAC_STATUS_CFG_CHANGED |
7014 MAC_STATUS_MI_COMPLETION |
7015 MAC_STATUS_LNKSTATE_CHANGED));
7018 tg3_setup_phy(tp, false);
7019 spin_unlock(&tp->lock);
7024 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7025 struct tg3_rx_prodring_set *dpr,
7026 struct tg3_rx_prodring_set *spr)
7028 u32 si, di, cpycnt, src_prod_idx;
7032 src_prod_idx = spr->rx_std_prod_idx;
7034 /* Make sure updates to the rx_std_buffers[] entries and the
7035 * standard producer index are seen in the correct order.
7039 if (spr->rx_std_cons_idx == src_prod_idx)
7042 if (spr->rx_std_cons_idx < src_prod_idx)
7043 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7045 cpycnt = tp->rx_std_ring_mask + 1 -
7046 spr->rx_std_cons_idx;
7048 cpycnt = min(cpycnt,
7049 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7051 si = spr->rx_std_cons_idx;
7052 di = dpr->rx_std_prod_idx;
7054 for (i = di; i < di + cpycnt; i++) {
7055 if (dpr->rx_std_buffers[i].data) {
7065 /* Ensure that updates to the rx_std_buffers ring and the
7066 * shadowed hardware producer ring from tg3_recycle_skb() are
7067 * ordered correctly WRT the skb check above.
7071 memcpy(&dpr->rx_std_buffers[di],
7072 &spr->rx_std_buffers[si],
7073 cpycnt * sizeof(struct ring_info));
7075 for (i = 0; i < cpycnt; i++, di++, si++) {
7076 struct tg3_rx_buffer_desc *sbd, *dbd;
7077 sbd = &spr->rx_std[si];
7078 dbd = &dpr->rx_std[di];
7079 dbd->addr_hi = sbd->addr_hi;
7080 dbd->addr_lo = sbd->addr_lo;
7083 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7084 tp->rx_std_ring_mask;
7085 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7086 tp->rx_std_ring_mask;
7090 src_prod_idx = spr->rx_jmb_prod_idx;
7092 /* Make sure updates to the rx_jmb_buffers[] entries and
7093 * the jumbo producer index are seen in the correct order.
7097 if (spr->rx_jmb_cons_idx == src_prod_idx)
7100 if (spr->rx_jmb_cons_idx < src_prod_idx)
7101 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7103 cpycnt = tp->rx_jmb_ring_mask + 1 -
7104 spr->rx_jmb_cons_idx;
7106 cpycnt = min(cpycnt,
7107 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7109 si = spr->rx_jmb_cons_idx;
7110 di = dpr->rx_jmb_prod_idx;
7112 for (i = di; i < di + cpycnt; i++) {
7113 if (dpr->rx_jmb_buffers[i].data) {
7123 /* Ensure that updates to the rx_jmb_buffers ring and the
7124 * shadowed hardware producer ring from tg3_recycle_skb() are
7125 * ordered correctly WRT the skb check above.
7129 memcpy(&dpr->rx_jmb_buffers[di],
7130 &spr->rx_jmb_buffers[si],
7131 cpycnt * sizeof(struct ring_info));
7133 for (i = 0; i < cpycnt; i++, di++, si++) {
7134 struct tg3_rx_buffer_desc *sbd, *dbd;
7135 sbd = &spr->rx_jmb[si].std;
7136 dbd = &dpr->rx_jmb[di].std;
7137 dbd->addr_hi = sbd->addr_hi;
7138 dbd->addr_lo = sbd->addr_lo;
7141 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7142 tp->rx_jmb_ring_mask;
7143 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7144 tp->rx_jmb_ring_mask;
7150 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7152 struct tg3 *tp = tnapi->tp;
7154 /* run TX completion thread */
7155 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7157 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7161 if (!tnapi->rx_rcb_prod_idx)
7164 /* run RX thread, within the bounds set by NAPI.
7165 * All RX "locking" is done by ensuring outside
7166 * code synchronizes with tg3->napi.poll()
7168 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7169 work_done += tg3_rx(tnapi, budget - work_done);
7171 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7172 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7174 u32 std_prod_idx = dpr->rx_std_prod_idx;
7175 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7177 tp->rx_refill = false;
7178 for (i = 1; i <= tp->rxq_cnt; i++)
7179 err |= tg3_rx_prodring_xfer(tp, dpr,
7180 &tp->napi[i].prodring);
7184 if (std_prod_idx != dpr->rx_std_prod_idx)
7185 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7186 dpr->rx_std_prod_idx);
7188 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7189 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7190 dpr->rx_jmb_prod_idx);
7193 tw32_f(HOSTCC_MODE, tp->coal_now);
7199 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7201 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7202 schedule_work(&tp->reset_task);
7205 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7207 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7208 cancel_work_sync(&tp->reset_task);
7209 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7212 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7214 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7215 struct tg3 *tp = tnapi->tp;
7217 struct tg3_hw_status *sblk = tnapi->hw_status;
7220 work_done = tg3_poll_work(tnapi, work_done, budget);
7222 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7225 if (unlikely(work_done >= budget))
7228 /* tp->last_tag is used in tg3_int_reenable() below
7229 * to tell the hw how much work has been processed,
7230 * so we must read it before checking for more work.
7232 tnapi->last_tag = sblk->status_tag;
7233 tnapi->last_irq_tag = tnapi->last_tag;
7236 /* check for RX/TX work to do */
7237 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7238 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7240 /* This test here is not race free, but will reduce
7241 * the number of interrupts by looping again.
7243 if (tnapi == &tp->napi[1] && tp->rx_refill)
7246 napi_complete_done(napi, work_done);
7247 /* Reenable interrupts. */
7248 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7250 /* This test here is synchronized by napi_schedule()
7251 * and napi_complete() to close the race condition.
7253 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7254 tw32(HOSTCC_MODE, tp->coalesce_mode |
7255 HOSTCC_MODE_ENABLE |
7262 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7266 /* work_done is guaranteed to be less than budget. */
7267 napi_complete(napi);
7268 tg3_reset_task_schedule(tp);
7272 static void tg3_process_error(struct tg3 *tp)
7275 bool real_error = false;
7277 if (tg3_flag(tp, ERROR_PROCESSED))
7280 /* Check Flow Attention register */
7281 val = tr32(HOSTCC_FLOW_ATTN);
7282 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7283 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7287 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7288 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7292 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7293 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7302 tg3_flag_set(tp, ERROR_PROCESSED);
7303 tg3_reset_task_schedule(tp);
7306 static int tg3_poll(struct napi_struct *napi, int budget)
7308 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7309 struct tg3 *tp = tnapi->tp;
7311 struct tg3_hw_status *sblk = tnapi->hw_status;
7314 if (sblk->status & SD_STATUS_ERROR)
7315 tg3_process_error(tp);
7319 work_done = tg3_poll_work(tnapi, work_done, budget);
7321 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7324 if (unlikely(work_done >= budget))
7327 if (tg3_flag(tp, TAGGED_STATUS)) {
7328 /* tp->last_tag is used in tg3_int_reenable() below
7329 * to tell the hw how much work has been processed,
7330 * so we must read it before checking for more work.
7332 tnapi->last_tag = sblk->status_tag;
7333 tnapi->last_irq_tag = tnapi->last_tag;
7336 sblk->status &= ~SD_STATUS_UPDATED;
7338 if (likely(!tg3_has_work(tnapi))) {
7339 napi_complete_done(napi, work_done);
7340 tg3_int_reenable(tnapi);
7345 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7349 /* work_done is guaranteed to be less than budget. */
7350 napi_complete(napi);
7351 tg3_reset_task_schedule(tp);
7355 static void tg3_napi_disable(struct tg3 *tp)
7359 for (i = tp->irq_cnt - 1; i >= 0; i--)
7360 napi_disable(&tp->napi[i].napi);
7363 static void tg3_napi_enable(struct tg3 *tp)
7367 for (i = 0; i < tp->irq_cnt; i++)
7368 napi_enable(&tp->napi[i].napi);
7371 static void tg3_napi_init(struct tg3 *tp)
7375 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7376 for (i = 1; i < tp->irq_cnt; i++)
7377 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7380 static void tg3_napi_fini(struct tg3 *tp)
7384 for (i = 0; i < tp->irq_cnt; i++)
7385 netif_napi_del(&tp->napi[i].napi);
7388 static inline void tg3_netif_stop(struct tg3 *tp)
7390 netif_trans_update(tp->dev); /* prevent tx timeout */
7391 tg3_napi_disable(tp);
7392 netif_carrier_off(tp->dev);
7393 netif_tx_disable(tp->dev);
7396 /* tp->lock must be held */
7397 static inline void tg3_netif_start(struct tg3 *tp)
7401 /* NOTE: unconditional netif_tx_wake_all_queues is only
7402 * appropriate so long as all callers are assured to
7403 * have free tx slots (such as after tg3_init_hw)
7405 netif_tx_wake_all_queues(tp->dev);
7408 netif_carrier_on(tp->dev);
7410 tg3_napi_enable(tp);
7411 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7412 tg3_enable_ints(tp);
7415 static void tg3_irq_quiesce(struct tg3 *tp)
7416 __releases(tp->lock)
7417 __acquires(tp->lock)
7421 BUG_ON(tp->irq_sync);
7426 spin_unlock_bh(&tp->lock);
7428 for (i = 0; i < tp->irq_cnt; i++)
7429 synchronize_irq(tp->napi[i].irq_vec);
7431 spin_lock_bh(&tp->lock);
7434 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7435 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7436 * with as well. Most of the time, this is not necessary except when
7437 * shutting down the device.
7439 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7441 spin_lock_bh(&tp->lock);
7443 tg3_irq_quiesce(tp);
7446 static inline void tg3_full_unlock(struct tg3 *tp)
7448 spin_unlock_bh(&tp->lock);
7451 /* One-shot MSI handler - Chip automatically disables interrupt
7452 * after sending MSI so driver doesn't have to do it.
7454 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7456 struct tg3_napi *tnapi = dev_id;
7457 struct tg3 *tp = tnapi->tp;
7459 prefetch(tnapi->hw_status);
7461 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7463 if (likely(!tg3_irq_sync(tp)))
7464 napi_schedule(&tnapi->napi);
7469 /* MSI ISR - No need to check for interrupt sharing and no need to
7470 * flush status block and interrupt mailbox. PCI ordering rules
7471 * guarantee that MSI will arrive after the status block.
7473 static irqreturn_t tg3_msi(int irq, void *dev_id)
7475 struct tg3_napi *tnapi = dev_id;
7476 struct tg3 *tp = tnapi->tp;
7478 prefetch(tnapi->hw_status);
7480 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7482 * Writing any value to intr-mbox-0 clears PCI INTA# and
7483 * chip-internal interrupt pending events.
7484 * Writing non-zero to intr-mbox-0 additional tells the
7485 * NIC to stop sending us irqs, engaging "in-intr-handler"
7488 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7489 if (likely(!tg3_irq_sync(tp)))
7490 napi_schedule(&tnapi->napi);
7492 return IRQ_RETVAL(1);
7495 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7497 struct tg3_napi *tnapi = dev_id;
7498 struct tg3 *tp = tnapi->tp;
7499 struct tg3_hw_status *sblk = tnapi->hw_status;
7500 unsigned int handled = 1;
7502 /* In INTx mode, it is possible for the interrupt to arrive at
7503 * the CPU before the status block posted prior to the interrupt.
7504 * Reading the PCI State register will confirm whether the
7505 * interrupt is ours and will flush the status block.
7507 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7508 if (tg3_flag(tp, CHIP_RESETTING) ||
7509 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7516 * Writing any value to intr-mbox-0 clears PCI INTA# and
7517 * chip-internal interrupt pending events.
7518 * Writing non-zero to intr-mbox-0 additional tells the
7519 * NIC to stop sending us irqs, engaging "in-intr-handler"
7522 * Flush the mailbox to de-assert the IRQ immediately to prevent
7523 * spurious interrupts. The flush impacts performance but
7524 * excessive spurious interrupts can be worse in some cases.
7526 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7527 if (tg3_irq_sync(tp))
7529 sblk->status &= ~SD_STATUS_UPDATED;
7530 if (likely(tg3_has_work(tnapi))) {
7531 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7532 napi_schedule(&tnapi->napi);
7534 /* No work, shared interrupt perhaps? re-enable
7535 * interrupts, and flush that PCI write
7537 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7541 return IRQ_RETVAL(handled);
7544 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7546 struct tg3_napi *tnapi = dev_id;
7547 struct tg3 *tp = tnapi->tp;
7548 struct tg3_hw_status *sblk = tnapi->hw_status;
7549 unsigned int handled = 1;
7551 /* In INTx mode, it is possible for the interrupt to arrive at
7552 * the CPU before the status block posted prior to the interrupt.
7553 * Reading the PCI State register will confirm whether the
7554 * interrupt is ours and will flush the status block.
7556 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7557 if (tg3_flag(tp, CHIP_RESETTING) ||
7558 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7565 * writing any value to intr-mbox-0 clears PCI INTA# and
7566 * chip-internal interrupt pending events.
7567 * writing non-zero to intr-mbox-0 additional tells the
7568 * NIC to stop sending us irqs, engaging "in-intr-handler"
7571 * Flush the mailbox to de-assert the IRQ immediately to prevent
7572 * spurious interrupts. The flush impacts performance but
7573 * excessive spurious interrupts can be worse in some cases.
7575 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7578 * In a shared interrupt configuration, sometimes other devices'
7579 * interrupts will scream. We record the current status tag here
7580 * so that the above check can report that the screaming interrupts
7581 * are unhandled. Eventually they will be silenced.
7583 tnapi->last_irq_tag = sblk->status_tag;
7585 if (tg3_irq_sync(tp))
7588 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7590 napi_schedule(&tnapi->napi);
7593 return IRQ_RETVAL(handled);
7596 /* ISR for interrupt test */
7597 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7599 struct tg3_napi *tnapi = dev_id;
7600 struct tg3 *tp = tnapi->tp;
7601 struct tg3_hw_status *sblk = tnapi->hw_status;
7603 if ((sblk->status & SD_STATUS_UPDATED) ||
7604 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7605 tg3_disable_ints(tp);
7606 return IRQ_RETVAL(1);
7608 return IRQ_RETVAL(0);
7611 #ifdef CONFIG_NET_POLL_CONTROLLER
7612 static void tg3_poll_controller(struct net_device *dev)
7615 struct tg3 *tp = netdev_priv(dev);
7617 if (tg3_irq_sync(tp))
7620 for (i = 0; i < tp->irq_cnt; i++)
7621 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7625 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7627 struct tg3 *tp = netdev_priv(dev);
7629 if (netif_msg_tx_err(tp)) {
7630 netdev_err(dev, "transmit timed out, resetting\n");
7634 tg3_reset_task_schedule(tp);
7637 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7638 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7640 u32 base = (u32) mapping & 0xffffffff;
7642 return base + len + 8 < base;
7645 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7646 * of any 4GB boundaries: 4G, 8G, etc
7648 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7651 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7652 u32 base = (u32) mapping & 0xffffffff;
7654 return ((base + len + (mss & 0x3fff)) < base);
7659 /* Test for DMA addresses > 40-bit */
7660 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7663 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7664 if (tg3_flag(tp, 40BIT_DMA_BUG))
7665 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7672 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7673 dma_addr_t mapping, u32 len, u32 flags,
7676 txbd->addr_hi = ((u64) mapping >> 32);
7677 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7678 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7679 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7682 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7683 dma_addr_t map, u32 len, u32 flags,
7686 struct tg3 *tp = tnapi->tp;
7689 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7692 if (tg3_4g_overflow_test(map, len))
7695 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7698 if (tg3_40bit_overflow_test(tp, map, len))
7701 if (tp->dma_limit) {
7702 u32 prvidx = *entry;
7703 u32 tmp_flag = flags & ~TXD_FLAG_END;
7704 while (len > tp->dma_limit && *budget) {
7705 u32 frag_len = tp->dma_limit;
7706 len -= tp->dma_limit;
7708 /* Avoid the 8byte DMA problem */
7710 len += tp->dma_limit / 2;
7711 frag_len = tp->dma_limit / 2;
7714 tnapi->tx_buffers[*entry].fragmented = true;
7716 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7717 frag_len, tmp_flag, mss, vlan);
7720 *entry = NEXT_TX(*entry);
7727 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7728 len, flags, mss, vlan);
7730 *entry = NEXT_TX(*entry);
7733 tnapi->tx_buffers[prvidx].fragmented = false;
7737 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7738 len, flags, mss, vlan);
7739 *entry = NEXT_TX(*entry);
7745 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7748 struct sk_buff *skb;
7749 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7754 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7755 skb_headlen(skb), DMA_TO_DEVICE);
7757 while (txb->fragmented) {
7758 txb->fragmented = false;
7759 entry = NEXT_TX(entry);
7760 txb = &tnapi->tx_buffers[entry];
7763 for (i = 0; i <= last; i++) {
7764 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7766 entry = NEXT_TX(entry);
7767 txb = &tnapi->tx_buffers[entry];
7769 dma_unmap_page(&tnapi->tp->pdev->dev,
7770 dma_unmap_addr(txb, mapping),
7771 skb_frag_size(frag), DMA_TO_DEVICE);
7773 while (txb->fragmented) {
7774 txb->fragmented = false;
7775 entry = NEXT_TX(entry);
7776 txb = &tnapi->tx_buffers[entry];
7781 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7782 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7783 struct sk_buff **pskb,
7784 u32 *entry, u32 *budget,
7785 u32 base_flags, u32 mss, u32 vlan)
7787 struct tg3 *tp = tnapi->tp;
7788 struct sk_buff *new_skb, *skb = *pskb;
7789 dma_addr_t new_addr = 0;
7792 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7793 new_skb = skb_copy(skb, GFP_ATOMIC);
7795 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7797 new_skb = skb_copy_expand(skb,
7798 skb_headroom(skb) + more_headroom,
7799 skb_tailroom(skb), GFP_ATOMIC);
7805 /* New SKB is guaranteed to be linear. */
7806 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7807 new_skb->len, DMA_TO_DEVICE);
7808 /* Make sure the mapping succeeded */
7809 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7810 dev_kfree_skb_any(new_skb);
7813 u32 save_entry = *entry;
7815 base_flags |= TXD_FLAG_END;
7817 tnapi->tx_buffers[*entry].skb = new_skb;
7818 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7821 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7822 new_skb->len, base_flags,
7824 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7825 dev_kfree_skb_any(new_skb);
7831 dev_consume_skb_any(skb);
7836 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7838 /* Check if we will never have enough descriptors,
7839 * as gso_segs can be more than current ring size
7841 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7844 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7846 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7847 * indicated in tg3_tx_frag_set()
7849 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7850 struct netdev_queue *txq, struct sk_buff *skb)
7852 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7853 struct sk_buff *segs, *seg, *next;
7855 /* Estimate the number of fragments in the worst case */
7856 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7857 netif_tx_stop_queue(txq);
7859 /* netif_tx_stop_queue() must be done before checking
7860 * checking tx index in tg3_tx_avail() below, because in
7861 * tg3_tx(), we update tx index before checking for
7862 * netif_tx_queue_stopped().
7865 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7866 return NETDEV_TX_BUSY;
7868 netif_tx_wake_queue(txq);
7871 segs = skb_gso_segment(skb, tp->dev->features &
7872 ~(NETIF_F_TSO | NETIF_F_TSO6));
7873 if (IS_ERR(segs) || !segs)
7874 goto tg3_tso_bug_end;
7876 skb_list_walk_safe(segs, seg, next) {
7877 skb_mark_not_on_list(seg);
7878 tg3_start_xmit(seg, tp->dev);
7882 dev_consume_skb_any(skb);
7884 return NETDEV_TX_OK;
7887 /* hard_start_xmit for all devices */
7888 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7890 struct tg3 *tp = netdev_priv(dev);
7891 u32 len, entry, base_flags, mss, vlan = 0;
7893 int i = -1, would_hit_hwbug;
7895 struct tg3_napi *tnapi;
7896 struct netdev_queue *txq;
7898 struct iphdr *iph = NULL;
7899 struct tcphdr *tcph = NULL;
7900 __sum16 tcp_csum = 0, ip_csum = 0;
7901 __be16 ip_tot_len = 0;
7903 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7904 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7905 if (tg3_flag(tp, ENABLE_TSS))
7908 budget = tg3_tx_avail(tnapi);
7910 /* We are running in BH disabled context with netif_tx_lock
7911 * and TX reclaim runs via tp->napi.poll inside of a software
7912 * interrupt. Furthermore, IRQ processing runs lockless so we have
7913 * no IRQ context deadlocks to worry about either. Rejoice!
7915 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7916 if (!netif_tx_queue_stopped(txq)) {
7917 netif_tx_stop_queue(txq);
7919 /* This is a hard error, log it. */
7921 "BUG! Tx Ring full when queue awake!\n");
7923 return NETDEV_TX_BUSY;
7926 entry = tnapi->tx_prod;
7929 mss = skb_shinfo(skb)->gso_size;
7931 u32 tcp_opt_len, hdr_len;
7933 if (skb_cow_head(skb, 0))
7937 tcp_opt_len = tcp_optlen(skb);
7939 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7941 /* HW/FW can not correctly segment packets that have been
7942 * vlan encapsulated.
7944 if (skb->protocol == htons(ETH_P_8021Q) ||
7945 skb->protocol == htons(ETH_P_8021AD)) {
7946 if (tg3_tso_bug_gso_check(tnapi, skb))
7947 return tg3_tso_bug(tp, tnapi, txq, skb);
7951 if (!skb_is_gso_v6(skb)) {
7952 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7953 tg3_flag(tp, TSO_BUG)) {
7954 if (tg3_tso_bug_gso_check(tnapi, skb))
7955 return tg3_tso_bug(tp, tnapi, txq, skb);
7958 ip_csum = iph->check;
7959 ip_tot_len = iph->tot_len;
7961 iph->tot_len = htons(mss + hdr_len);
7964 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7965 TXD_FLAG_CPU_POST_DMA);
7967 tcph = tcp_hdr(skb);
7968 tcp_csum = tcph->check;
7970 if (tg3_flag(tp, HW_TSO_1) ||
7971 tg3_flag(tp, HW_TSO_2) ||
7972 tg3_flag(tp, HW_TSO_3)) {
7974 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7976 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7980 if (tg3_flag(tp, HW_TSO_3)) {
7981 mss |= (hdr_len & 0xc) << 12;
7983 base_flags |= 0x00000010;
7984 base_flags |= (hdr_len & 0x3e0) << 5;
7985 } else if (tg3_flag(tp, HW_TSO_2))
7986 mss |= hdr_len << 9;
7987 else if (tg3_flag(tp, HW_TSO_1) ||
7988 tg3_asic_rev(tp) == ASIC_REV_5705) {
7989 if (tcp_opt_len || iph->ihl > 5) {
7992 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7993 mss |= (tsflags << 11);
7996 if (tcp_opt_len || iph->ihl > 5) {
7999 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8000 base_flags |= tsflags << 12;
8003 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8004 /* HW/FW can not correctly checksum packets that have been
8005 * vlan encapsulated.
8007 if (skb->protocol == htons(ETH_P_8021Q) ||
8008 skb->protocol == htons(ETH_P_8021AD)) {
8009 if (skb_checksum_help(skb))
8012 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8016 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8017 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8018 base_flags |= TXD_FLAG_JMB_PKT;
8020 if (skb_vlan_tag_present(skb)) {
8021 base_flags |= TXD_FLAG_VLAN;
8022 vlan = skb_vlan_tag_get(skb);
8025 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8026 tg3_flag(tp, TX_TSTAMP_EN)) {
8027 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8028 base_flags |= TXD_FLAG_HWTSTAMP;
8031 len = skb_headlen(skb);
8033 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8035 if (dma_mapping_error(&tp->pdev->dev, mapping))
8039 tnapi->tx_buffers[entry].skb = skb;
8040 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8042 would_hit_hwbug = 0;
8044 if (tg3_flag(tp, 5701_DMA_BUG))
8045 would_hit_hwbug = 1;
8047 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8048 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8050 would_hit_hwbug = 1;
8051 } else if (skb_shinfo(skb)->nr_frags > 0) {
8054 if (!tg3_flag(tp, HW_TSO_1) &&
8055 !tg3_flag(tp, HW_TSO_2) &&
8056 !tg3_flag(tp, HW_TSO_3))
8059 /* Now loop through additional data
8060 * fragments, and queue them.
8062 last = skb_shinfo(skb)->nr_frags - 1;
8063 for (i = 0; i <= last; i++) {
8064 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8066 len = skb_frag_size(frag);
8067 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8068 len, DMA_TO_DEVICE);
8070 tnapi->tx_buffers[entry].skb = NULL;
8071 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8073 if (dma_mapping_error(&tp->pdev->dev, mapping))
8077 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8079 ((i == last) ? TXD_FLAG_END : 0),
8081 would_hit_hwbug = 1;
8087 if (would_hit_hwbug) {
8088 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8090 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8091 /* If it's a TSO packet, do GSO instead of
8092 * allocating and copying to a large linear SKB
8095 iph->check = ip_csum;
8096 iph->tot_len = ip_tot_len;
8098 tcph->check = tcp_csum;
8099 return tg3_tso_bug(tp, tnapi, txq, skb);
8102 /* If the workaround fails due to memory/mapping
8103 * failure, silently drop this packet.
8105 entry = tnapi->tx_prod;
8106 budget = tg3_tx_avail(tnapi);
8107 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8108 base_flags, mss, vlan))
8112 skb_tx_timestamp(skb);
8113 netdev_tx_sent_queue(txq, skb->len);
8115 /* Sync BD data before updating mailbox */
8118 tnapi->tx_prod = entry;
8119 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8120 netif_tx_stop_queue(txq);
8122 /* netif_tx_stop_queue() must be done before checking
8123 * checking tx index in tg3_tx_avail() below, because in
8124 * tg3_tx(), we update tx index before checking for
8125 * netif_tx_queue_stopped().
8128 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8129 netif_tx_wake_queue(txq);
8132 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8133 /* Packets are ready, update Tx producer idx on card. */
8134 tw32_tx_mbox(tnapi->prodmbox, entry);
8137 return NETDEV_TX_OK;
8140 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8141 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8143 dev_kfree_skb_any(skb);
8146 return NETDEV_TX_OK;
8149 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8152 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8153 MAC_MODE_PORT_MODE_MASK);
8155 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8157 if (!tg3_flag(tp, 5705_PLUS))
8158 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8160 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8161 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8163 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8165 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8167 if (tg3_flag(tp, 5705_PLUS) ||
8168 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8169 tg3_asic_rev(tp) == ASIC_REV_5700)
8170 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8173 tw32(MAC_MODE, tp->mac_mode);
8177 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8179 u32 val, bmcr, mac_mode, ptest = 0;
8181 tg3_phy_toggle_apd(tp, false);
8182 tg3_phy_toggle_automdix(tp, false);
8184 if (extlpbk && tg3_phy_set_extloopbk(tp))
8187 bmcr = BMCR_FULLDPLX;
8192 bmcr |= BMCR_SPEED100;
8196 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8198 bmcr |= BMCR_SPEED100;
8201 bmcr |= BMCR_SPEED1000;
8206 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8207 tg3_readphy(tp, MII_CTRL1000, &val);
8208 val |= CTL1000_AS_MASTER |
8209 CTL1000_ENABLE_MASTER;
8210 tg3_writephy(tp, MII_CTRL1000, val);
8212 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8213 MII_TG3_FET_PTEST_TRIM_2;
8214 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8217 bmcr |= BMCR_LOOPBACK;
8219 tg3_writephy(tp, MII_BMCR, bmcr);
8221 /* The write needs to be flushed for the FETs */
8222 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8223 tg3_readphy(tp, MII_BMCR, &bmcr);
8227 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8228 tg3_asic_rev(tp) == ASIC_REV_5785) {
8229 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8230 MII_TG3_FET_PTEST_FRC_TX_LINK |
8231 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8233 /* The write needs to be flushed for the AC131 */
8234 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8237 /* Reset to prevent losing 1st rx packet intermittently */
8238 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8239 tg3_flag(tp, 5780_CLASS)) {
8240 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8242 tw32_f(MAC_RX_MODE, tp->rx_mode);
8245 mac_mode = tp->mac_mode &
8246 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8247 if (speed == SPEED_1000)
8248 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8250 mac_mode |= MAC_MODE_PORT_MODE_MII;
8252 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8253 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8255 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8256 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8257 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8258 mac_mode |= MAC_MODE_LINK_POLARITY;
8260 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8261 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8264 tw32(MAC_MODE, mac_mode);
8270 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8272 struct tg3 *tp = netdev_priv(dev);
8274 if (features & NETIF_F_LOOPBACK) {
8275 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8278 spin_lock_bh(&tp->lock);
8279 tg3_mac_loopback(tp, true);
8280 netif_carrier_on(tp->dev);
8281 spin_unlock_bh(&tp->lock);
8282 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8284 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8287 spin_lock_bh(&tp->lock);
8288 tg3_mac_loopback(tp, false);
8289 /* Force link status check */
8290 tg3_setup_phy(tp, true);
8291 spin_unlock_bh(&tp->lock);
8292 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8296 static netdev_features_t tg3_fix_features(struct net_device *dev,
8297 netdev_features_t features)
8299 struct tg3 *tp = netdev_priv(dev);
8301 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8302 features &= ~NETIF_F_ALL_TSO;
8307 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8309 netdev_features_t changed = dev->features ^ features;
8311 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8312 tg3_set_loopback(dev, features);
8317 static void tg3_rx_prodring_free(struct tg3 *tp,
8318 struct tg3_rx_prodring_set *tpr)
8322 if (tpr != &tp->napi[0].prodring) {
8323 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8324 i = (i + 1) & tp->rx_std_ring_mask)
8325 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8328 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8329 for (i = tpr->rx_jmb_cons_idx;
8330 i != tpr->rx_jmb_prod_idx;
8331 i = (i + 1) & tp->rx_jmb_ring_mask) {
8332 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8340 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8341 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8344 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8345 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8346 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8351 /* Initialize rx rings for packet processing.
8353 * The chip has been shut down and the driver detached from
8354 * the networking, so no interrupts or new tx packets will
8355 * end up in the driver. tp->{tx,}lock are held and thus
8358 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8359 struct tg3_rx_prodring_set *tpr)
8361 u32 i, rx_pkt_dma_sz;
8363 tpr->rx_std_cons_idx = 0;
8364 tpr->rx_std_prod_idx = 0;
8365 tpr->rx_jmb_cons_idx = 0;
8366 tpr->rx_jmb_prod_idx = 0;
8368 if (tpr != &tp->napi[0].prodring) {
8369 memset(&tpr->rx_std_buffers[0], 0,
8370 TG3_RX_STD_BUFF_RING_SIZE(tp));
8371 if (tpr->rx_jmb_buffers)
8372 memset(&tpr->rx_jmb_buffers[0], 0,
8373 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8377 /* Zero out all descriptors. */
8378 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8380 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8381 if (tg3_flag(tp, 5780_CLASS) &&
8382 tp->dev->mtu > ETH_DATA_LEN)
8383 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8384 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8386 /* Initialize invariants of the rings, we only set this
8387 * stuff once. This works because the card does not
8388 * write into the rx buffer posting rings.
8390 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8391 struct tg3_rx_buffer_desc *rxd;
8393 rxd = &tpr->rx_std[i];
8394 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8395 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8396 rxd->opaque = (RXD_OPAQUE_RING_STD |
8397 (i << RXD_OPAQUE_INDEX_SHIFT));
8400 /* Now allocate fresh SKBs for each rx ring. */
8401 for (i = 0; i < tp->rx_pending; i++) {
8402 unsigned int frag_size;
8404 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8406 netdev_warn(tp->dev,
8407 "Using a smaller RX standard ring. Only "
8408 "%d out of %d buffers were allocated "
8409 "successfully\n", i, tp->rx_pending);
8417 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8420 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8422 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8425 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8426 struct tg3_rx_buffer_desc *rxd;
8428 rxd = &tpr->rx_jmb[i].std;
8429 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8430 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8432 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8433 (i << RXD_OPAQUE_INDEX_SHIFT));
8436 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8437 unsigned int frag_size;
8439 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8441 netdev_warn(tp->dev,
8442 "Using a smaller RX jumbo ring. Only %d "
8443 "out of %d buffers were allocated "
8444 "successfully\n", i, tp->rx_jumbo_pending);
8447 tp->rx_jumbo_pending = i;
8456 tg3_rx_prodring_free(tp, tpr);
8460 static void tg3_rx_prodring_fini(struct tg3 *tp,
8461 struct tg3_rx_prodring_set *tpr)
8463 kfree(tpr->rx_std_buffers);
8464 tpr->rx_std_buffers = NULL;
8465 kfree(tpr->rx_jmb_buffers);
8466 tpr->rx_jmb_buffers = NULL;
8468 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8469 tpr->rx_std, tpr->rx_std_mapping);
8473 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8474 tpr->rx_jmb, tpr->rx_jmb_mapping);
8479 static int tg3_rx_prodring_init(struct tg3 *tp,
8480 struct tg3_rx_prodring_set *tpr)
8482 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8484 if (!tpr->rx_std_buffers)
8487 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8488 TG3_RX_STD_RING_BYTES(tp),
8489 &tpr->rx_std_mapping,
8494 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8495 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8497 if (!tpr->rx_jmb_buffers)
8500 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8501 TG3_RX_JMB_RING_BYTES(tp),
8502 &tpr->rx_jmb_mapping,
8511 tg3_rx_prodring_fini(tp, tpr);
8515 /* Free up pending packets in all rx/tx rings.
8517 * The chip has been shut down and the driver detached from
8518 * the networking, so no interrupts or new tx packets will
8519 * end up in the driver. tp->{tx,}lock is not held and we are not
8520 * in an interrupt context and thus may sleep.
8522 static void tg3_free_rings(struct tg3 *tp)
8526 for (j = 0; j < tp->irq_cnt; j++) {
8527 struct tg3_napi *tnapi = &tp->napi[j];
8529 tg3_rx_prodring_free(tp, &tnapi->prodring);
8531 if (!tnapi->tx_buffers)
8534 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8535 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8540 tg3_tx_skb_unmap(tnapi, i,
8541 skb_shinfo(skb)->nr_frags - 1);
8543 dev_consume_skb_any(skb);
8545 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8549 /* Initialize tx/rx rings for packet processing.
8551 * The chip has been shut down and the driver detached from
8552 * the networking, so no interrupts or new tx packets will
8553 * end up in the driver. tp->{tx,}lock are held and thus
8556 static int tg3_init_rings(struct tg3 *tp)
8560 /* Free up all the SKBs. */
8563 for (i = 0; i < tp->irq_cnt; i++) {
8564 struct tg3_napi *tnapi = &tp->napi[i];
8566 tnapi->last_tag = 0;
8567 tnapi->last_irq_tag = 0;
8568 tnapi->hw_status->status = 0;
8569 tnapi->hw_status->status_tag = 0;
8570 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8575 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8577 tnapi->rx_rcb_ptr = 0;
8579 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8581 if (tnapi->prodring.rx_std &&
8582 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8591 static void tg3_mem_tx_release(struct tg3 *tp)
8595 for (i = 0; i < tp->irq_max; i++) {
8596 struct tg3_napi *tnapi = &tp->napi[i];
8598 if (tnapi->tx_ring) {
8599 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8600 tnapi->tx_ring, tnapi->tx_desc_mapping);
8601 tnapi->tx_ring = NULL;
8604 kfree(tnapi->tx_buffers);
8605 tnapi->tx_buffers = NULL;
8609 static int tg3_mem_tx_acquire(struct tg3 *tp)
8612 struct tg3_napi *tnapi = &tp->napi[0];
8614 /* If multivector TSS is enabled, vector 0 does not handle
8615 * tx interrupts. Don't allocate any resources for it.
8617 if (tg3_flag(tp, ENABLE_TSS))
8620 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8621 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8622 sizeof(struct tg3_tx_ring_info),
8624 if (!tnapi->tx_buffers)
8627 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8629 &tnapi->tx_desc_mapping,
8631 if (!tnapi->tx_ring)
8638 tg3_mem_tx_release(tp);
8642 static void tg3_mem_rx_release(struct tg3 *tp)
8646 for (i = 0; i < tp->irq_max; i++) {
8647 struct tg3_napi *tnapi = &tp->napi[i];
8649 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8654 dma_free_coherent(&tp->pdev->dev,
8655 TG3_RX_RCB_RING_BYTES(tp),
8657 tnapi->rx_rcb_mapping);
8658 tnapi->rx_rcb = NULL;
8662 static int tg3_mem_rx_acquire(struct tg3 *tp)
8664 unsigned int i, limit;
8666 limit = tp->rxq_cnt;
8668 /* If RSS is enabled, we need a (dummy) producer ring
8669 * set on vector zero. This is the true hw prodring.
8671 if (tg3_flag(tp, ENABLE_RSS))
8674 for (i = 0; i < limit; i++) {
8675 struct tg3_napi *tnapi = &tp->napi[i];
8677 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8680 /* If multivector RSS is enabled, vector 0
8681 * does not handle rx or tx interrupts.
8682 * Don't allocate any resources for it.
8684 if (!i && tg3_flag(tp, ENABLE_RSS))
8687 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8688 TG3_RX_RCB_RING_BYTES(tp),
8689 &tnapi->rx_rcb_mapping,
8698 tg3_mem_rx_release(tp);
8703 * Must not be invoked with interrupt sources disabled and
8704 * the hardware shutdown down.
8706 static void tg3_free_consistent(struct tg3 *tp)
8710 for (i = 0; i < tp->irq_cnt; i++) {
8711 struct tg3_napi *tnapi = &tp->napi[i];
8713 if (tnapi->hw_status) {
8714 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8716 tnapi->status_mapping);
8717 tnapi->hw_status = NULL;
8721 tg3_mem_rx_release(tp);
8722 tg3_mem_tx_release(tp);
8724 /* tp->hw_stats can be referenced safely:
8725 * 1. under rtnl_lock
8726 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8729 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8730 tp->hw_stats, tp->stats_mapping);
8731 tp->hw_stats = NULL;
8736 * Must not be invoked with interrupt sources disabled and
8737 * the hardware shutdown down. Can sleep.
8739 static int tg3_alloc_consistent(struct tg3 *tp)
8743 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8744 sizeof(struct tg3_hw_stats),
8745 &tp->stats_mapping, GFP_KERNEL);
8749 for (i = 0; i < tp->irq_cnt; i++) {
8750 struct tg3_napi *tnapi = &tp->napi[i];
8751 struct tg3_hw_status *sblk;
8753 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8755 &tnapi->status_mapping,
8757 if (!tnapi->hw_status)
8760 sblk = tnapi->hw_status;
8762 if (tg3_flag(tp, ENABLE_RSS)) {
8763 u16 *prodptr = NULL;
8766 * When RSS is enabled, the status block format changes
8767 * slightly. The "rx_jumbo_consumer", "reserved",
8768 * and "rx_mini_consumer" members get mapped to the
8769 * other three rx return ring producer indexes.
8773 prodptr = &sblk->idx[0].rx_producer;
8776 prodptr = &sblk->rx_jumbo_consumer;
8779 prodptr = &sblk->reserved;
8782 prodptr = &sblk->rx_mini_consumer;
8785 tnapi->rx_rcb_prod_idx = prodptr;
8787 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8791 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8797 tg3_free_consistent(tp);
8801 #define MAX_WAIT_CNT 1000
8803 /* To stop a block, clear the enable bit and poll till it
8804 * clears. tp->lock is held.
8806 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8811 if (tg3_flag(tp, 5705_PLUS)) {
8818 /* We can't enable/disable these bits of the
8819 * 5705/5750, just say success.
8832 for (i = 0; i < MAX_WAIT_CNT; i++) {
8833 if (pci_channel_offline(tp->pdev)) {
8834 dev_err(&tp->pdev->dev,
8835 "tg3_stop_block device offline, "
8836 "ofs=%lx enable_bit=%x\n",
8843 if ((val & enable_bit) == 0)
8847 if (i == MAX_WAIT_CNT && !silent) {
8848 dev_err(&tp->pdev->dev,
8849 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8857 /* tp->lock is held. */
8858 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8862 tg3_disable_ints(tp);
8864 if (pci_channel_offline(tp->pdev)) {
8865 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8866 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8871 tp->rx_mode &= ~RX_MODE_ENABLE;
8872 tw32_f(MAC_RX_MODE, tp->rx_mode);
8875 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8876 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8877 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8878 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8879 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8880 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8882 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8883 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8884 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8885 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8886 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8887 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8890 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8891 tw32_f(MAC_MODE, tp->mac_mode);
8894 tp->tx_mode &= ~TX_MODE_ENABLE;
8895 tw32_f(MAC_TX_MODE, tp->tx_mode);
8897 for (i = 0; i < MAX_WAIT_CNT; i++) {
8899 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8902 if (i >= MAX_WAIT_CNT) {
8903 dev_err(&tp->pdev->dev,
8904 "%s timed out, TX_MODE_ENABLE will not clear "
8905 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8909 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8910 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8911 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8913 tw32(FTQ_RESET, 0xffffffff);
8914 tw32(FTQ_RESET, 0x00000000);
8916 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8917 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8920 for (i = 0; i < tp->irq_cnt; i++) {
8921 struct tg3_napi *tnapi = &tp->napi[i];
8922 if (tnapi->hw_status)
8923 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8929 /* Save PCI command register before chip reset */
8930 static void tg3_save_pci_state(struct tg3 *tp)
8932 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8935 /* Restore PCI state after chip reset */
8936 static void tg3_restore_pci_state(struct tg3 *tp)
8940 /* Re-enable indirect register accesses. */
8941 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8942 tp->misc_host_ctrl);
8944 /* Set MAX PCI retry to zero. */
8945 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8946 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8947 tg3_flag(tp, PCIX_MODE))
8948 val |= PCISTATE_RETRY_SAME_DMA;
8949 /* Allow reads and writes to the APE register and memory space. */
8950 if (tg3_flag(tp, ENABLE_APE))
8951 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8952 PCISTATE_ALLOW_APE_SHMEM_WR |
8953 PCISTATE_ALLOW_APE_PSPACE_WR;
8954 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8956 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8958 if (!tg3_flag(tp, PCI_EXPRESS)) {
8959 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8960 tp->pci_cacheline_sz);
8961 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8965 /* Make sure PCI-X relaxed ordering bit is clear. */
8966 if (tg3_flag(tp, PCIX_MODE)) {
8969 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8971 pcix_cmd &= ~PCI_X_CMD_ERO;
8972 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8976 if (tg3_flag(tp, 5780_CLASS)) {
8978 /* Chip reset on 5780 will reset MSI enable bit,
8979 * so need to restore it.
8981 if (tg3_flag(tp, USING_MSI)) {
8984 pci_read_config_word(tp->pdev,
8985 tp->msi_cap + PCI_MSI_FLAGS,
8987 pci_write_config_word(tp->pdev,
8988 tp->msi_cap + PCI_MSI_FLAGS,
8989 ctrl | PCI_MSI_FLAGS_ENABLE);
8990 val = tr32(MSGINT_MODE);
8991 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8996 static void tg3_override_clk(struct tg3 *tp)
9000 switch (tg3_asic_rev(tp)) {
9002 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9003 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9004 TG3_CPMU_MAC_ORIDE_ENABLE);
9009 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9017 static void tg3_restore_clk(struct tg3 *tp)
9021 switch (tg3_asic_rev(tp)) {
9023 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9024 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9025 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9030 val = tr32(TG3_CPMU_CLCK_ORIDE);
9031 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9039 /* tp->lock is held. */
9040 static int tg3_chip_reset(struct tg3 *tp)
9041 __releases(tp->lock)
9042 __acquires(tp->lock)
9045 void (*write_op)(struct tg3 *, u32, u32);
9048 if (!pci_device_is_present(tp->pdev))
9053 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9055 /* No matching tg3_nvram_unlock() after this because
9056 * chip reset below will undo the nvram lock.
9058 tp->nvram_lock_cnt = 0;
9060 /* GRC_MISC_CFG core clock reset will clear the memory
9061 * enable bit in PCI register 4 and the MSI enable bit
9062 * on some chips, so we save relevant registers here.
9064 tg3_save_pci_state(tp);
9066 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9067 tg3_flag(tp, 5755_PLUS))
9068 tw32(GRC_FASTBOOT_PC, 0);
9071 * We must avoid the readl() that normally takes place.
9072 * It locks machines, causes machine checks, and other
9073 * fun things. So, temporarily disable the 5701
9074 * hardware workaround, while we do the reset.
9076 write_op = tp->write32;
9077 if (write_op == tg3_write_flush_reg32)
9078 tp->write32 = tg3_write32;
9080 /* Prevent the irq handler from reading or writing PCI registers
9081 * during chip reset when the memory enable bit in the PCI command
9082 * register may be cleared. The chip does not generate interrupt
9083 * at this time, but the irq handler may still be called due to irq
9084 * sharing or irqpoll.
9086 tg3_flag_set(tp, CHIP_RESETTING);
9087 for (i = 0; i < tp->irq_cnt; i++) {
9088 struct tg3_napi *tnapi = &tp->napi[i];
9089 if (tnapi->hw_status) {
9090 tnapi->hw_status->status = 0;
9091 tnapi->hw_status->status_tag = 0;
9093 tnapi->last_tag = 0;
9094 tnapi->last_irq_tag = 0;
9098 tg3_full_unlock(tp);
9100 for (i = 0; i < tp->irq_cnt; i++)
9101 synchronize_irq(tp->napi[i].irq_vec);
9103 tg3_full_lock(tp, 0);
9105 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9106 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9107 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9111 val = GRC_MISC_CFG_CORECLK_RESET;
9113 if (tg3_flag(tp, PCI_EXPRESS)) {
9114 /* Force PCIe 1.0a mode */
9115 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9116 !tg3_flag(tp, 57765_PLUS) &&
9117 tr32(TG3_PCIE_PHY_TSTCTL) ==
9118 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9119 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9121 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9122 tw32(GRC_MISC_CFG, (1 << 29));
9127 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9128 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9129 tw32(GRC_VCPU_EXT_CTRL,
9130 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9133 /* Set the clock to the highest frequency to avoid timeouts. With link
9134 * aware mode, the clock speed could be slow and bootcode does not
9135 * complete within the expected time. Override the clock to allow the
9136 * bootcode to finish sooner and then restore it.
9138 tg3_override_clk(tp);
9140 /* Manage gphy power for all CPMU absent PCIe devices. */
9141 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9142 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9144 tw32(GRC_MISC_CFG, val);
9146 /* restore 5701 hardware bug workaround write method */
9147 tp->write32 = write_op;
9149 /* Unfortunately, we have to delay before the PCI read back.
9150 * Some 575X chips even will not respond to a PCI cfg access
9151 * when the reset command is given to the chip.
9153 * How do these hardware designers expect things to work
9154 * properly if the PCI write is posted for a long period
9155 * of time? It is always necessary to have some method by
9156 * which a register read back can occur to push the write
9157 * out which does the reset.
9159 * For most tg3 variants the trick below was working.
9164 /* Flush PCI posted writes. The normal MMIO registers
9165 * are inaccessible at this time so this is the only
9166 * way to make this reliably (actually, this is no longer
9167 * the case, see above). I tried to use indirect
9168 * register read/write but this upset some 5701 variants.
9170 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9174 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9177 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9181 /* Wait for link training to complete. */
9182 for (j = 0; j < 5000; j++)
9185 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9186 pci_write_config_dword(tp->pdev, 0xc4,
9187 cfg_val | (1 << 15));
9190 /* Clear the "no snoop" and "relaxed ordering" bits. */
9191 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9193 * Older PCIe devices only support the 128 byte
9194 * MPS setting. Enforce the restriction.
9196 if (!tg3_flag(tp, CPMU_PRESENT))
9197 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9198 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9200 /* Clear error status */
9201 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9202 PCI_EXP_DEVSTA_CED |
9203 PCI_EXP_DEVSTA_NFED |
9204 PCI_EXP_DEVSTA_FED |
9205 PCI_EXP_DEVSTA_URD);
9208 tg3_restore_pci_state(tp);
9210 tg3_flag_clear(tp, CHIP_RESETTING);
9211 tg3_flag_clear(tp, ERROR_PROCESSED);
9214 if (tg3_flag(tp, 5780_CLASS))
9215 val = tr32(MEMARB_MODE);
9216 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9218 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9220 tw32(0x5000, 0x400);
9223 if (tg3_flag(tp, IS_SSB_CORE)) {
9225 * BCM4785: In order to avoid repercussions from using
9226 * potentially defective internal ROM, stop the Rx RISC CPU,
9227 * which is not required.
9230 tg3_halt_cpu(tp, RX_CPU_BASE);
9233 err = tg3_poll_fw(tp);
9237 tw32(GRC_MODE, tp->grc_mode);
9239 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9242 tw32(0xc4, val | (1 << 15));
9245 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9246 tg3_asic_rev(tp) == ASIC_REV_5705) {
9247 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9248 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9249 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9250 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9253 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9254 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9256 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9257 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9262 tw32_f(MAC_MODE, val);
9265 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9269 if (tg3_flag(tp, PCI_EXPRESS) &&
9270 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9271 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9272 !tg3_flag(tp, 57765_PLUS)) {
9275 tw32(0x7c00, val | (1 << 25));
9278 tg3_restore_clk(tp);
9280 /* Increase the core clock speed to fix tx timeout issue for 5762
9281 * with 100Mbps link speed.
9283 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9284 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9285 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9286 TG3_CPMU_MAC_ORIDE_ENABLE);
9289 /* Reprobe ASF enable state. */
9290 tg3_flag_clear(tp, ENABLE_ASF);
9291 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9292 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9294 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9295 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9296 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9299 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9300 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9301 tg3_flag_set(tp, ENABLE_ASF);
9302 tp->last_event_jiffies = jiffies;
9303 if (tg3_flag(tp, 5750_PLUS))
9304 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9306 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9307 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9308 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9309 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9310 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9317 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9318 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9319 static void __tg3_set_rx_mode(struct net_device *);
9321 /* tp->lock is held. */
9322 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9328 tg3_write_sig_pre_reset(tp, kind);
9330 tg3_abort_hw(tp, silent);
9331 err = tg3_chip_reset(tp);
9333 __tg3_set_mac_addr(tp, false);
9335 tg3_write_sig_legacy(tp, kind);
9336 tg3_write_sig_post_reset(tp, kind);
9339 /* Save the stats across chip resets... */
9340 tg3_get_nstats(tp, &tp->net_stats_prev);
9341 tg3_get_estats(tp, &tp->estats_prev);
9343 /* And make sure the next sample is new data */
9344 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9350 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9352 struct tg3 *tp = netdev_priv(dev);
9353 struct sockaddr *addr = p;
9355 bool skip_mac_1 = false;
9357 if (!is_valid_ether_addr(addr->sa_data))
9358 return -EADDRNOTAVAIL;
9360 eth_hw_addr_set(dev, addr->sa_data);
9362 if (!netif_running(dev))
9365 if (tg3_flag(tp, ENABLE_ASF)) {
9366 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9368 addr0_high = tr32(MAC_ADDR_0_HIGH);
9369 addr0_low = tr32(MAC_ADDR_0_LOW);
9370 addr1_high = tr32(MAC_ADDR_1_HIGH);
9371 addr1_low = tr32(MAC_ADDR_1_LOW);
9373 /* Skip MAC addr 1 if ASF is using it. */
9374 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9375 !(addr1_high == 0 && addr1_low == 0))
9378 spin_lock_bh(&tp->lock);
9379 __tg3_set_mac_addr(tp, skip_mac_1);
9380 __tg3_set_rx_mode(dev);
9381 spin_unlock_bh(&tp->lock);
9386 /* tp->lock is held. */
9387 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9388 dma_addr_t mapping, u32 maxlen_flags,
9392 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9393 ((u64) mapping >> 32));
9395 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9396 ((u64) mapping & 0xffffffff));
9398 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9401 if (!tg3_flag(tp, 5705_PLUS))
9403 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9408 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9412 if (!tg3_flag(tp, ENABLE_TSS)) {
9413 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9414 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9415 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9417 tw32(HOSTCC_TXCOL_TICKS, 0);
9418 tw32(HOSTCC_TXMAX_FRAMES, 0);
9419 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9421 for (; i < tp->txq_cnt; i++) {
9424 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9425 tw32(reg, ec->tx_coalesce_usecs);
9426 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9427 tw32(reg, ec->tx_max_coalesced_frames);
9428 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9429 tw32(reg, ec->tx_max_coalesced_frames_irq);
9433 for (; i < tp->irq_max - 1; i++) {
9434 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9435 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9436 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9440 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9443 u32 limit = tp->rxq_cnt;
9445 if (!tg3_flag(tp, ENABLE_RSS)) {
9446 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9447 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9448 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9451 tw32(HOSTCC_RXCOL_TICKS, 0);
9452 tw32(HOSTCC_RXMAX_FRAMES, 0);
9453 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9456 for (; i < limit; i++) {
9459 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9460 tw32(reg, ec->rx_coalesce_usecs);
9461 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9462 tw32(reg, ec->rx_max_coalesced_frames);
9463 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9464 tw32(reg, ec->rx_max_coalesced_frames_irq);
9467 for (; i < tp->irq_max - 1; i++) {
9468 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9469 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9470 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9474 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9476 tg3_coal_tx_init(tp, ec);
9477 tg3_coal_rx_init(tp, ec);
9479 if (!tg3_flag(tp, 5705_PLUS)) {
9480 u32 val = ec->stats_block_coalesce_usecs;
9482 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9483 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9488 tw32(HOSTCC_STAT_COAL_TICKS, val);
9492 /* tp->lock is held. */
9493 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9497 /* Disable all transmit rings but the first. */
9498 if (!tg3_flag(tp, 5705_PLUS))
9499 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9500 else if (tg3_flag(tp, 5717_PLUS))
9501 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9502 else if (tg3_flag(tp, 57765_CLASS) ||
9503 tg3_asic_rev(tp) == ASIC_REV_5762)
9504 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9506 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9508 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9509 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9510 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9511 BDINFO_FLAGS_DISABLED);
9514 /* tp->lock is held. */
9515 static void tg3_tx_rcbs_init(struct tg3 *tp)
9518 u32 txrcb = NIC_SRAM_SEND_RCB;
9520 if (tg3_flag(tp, ENABLE_TSS))
9523 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9524 struct tg3_napi *tnapi = &tp->napi[i];
9526 if (!tnapi->tx_ring)
9529 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9530 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9531 NIC_SRAM_TX_BUFFER_DESC);
9535 /* tp->lock is held. */
9536 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9540 /* Disable all receive return rings but the first. */
9541 if (tg3_flag(tp, 5717_PLUS))
9542 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9543 else if (!tg3_flag(tp, 5705_PLUS))
9544 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9545 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9546 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9547 tg3_flag(tp, 57765_CLASS))
9548 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9550 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9552 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9553 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9554 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9555 BDINFO_FLAGS_DISABLED);
9558 /* tp->lock is held. */
9559 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9562 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9564 if (tg3_flag(tp, ENABLE_RSS))
9567 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9568 struct tg3_napi *tnapi = &tp->napi[i];
9573 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9574 (tp->rx_ret_ring_mask + 1) <<
9575 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9579 /* tp->lock is held. */
9580 static void tg3_rings_reset(struct tg3 *tp)
9584 struct tg3_napi *tnapi = &tp->napi[0];
9586 tg3_tx_rcbs_disable(tp);
9588 tg3_rx_ret_rcbs_disable(tp);
9590 /* Disable interrupts */
9591 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9592 tp->napi[0].chk_msi_cnt = 0;
9593 tp->napi[0].last_rx_cons = 0;
9594 tp->napi[0].last_tx_cons = 0;
9596 /* Zero mailbox registers. */
9597 if (tg3_flag(tp, SUPPORT_MSIX)) {
9598 for (i = 1; i < tp->irq_max; i++) {
9599 tp->napi[i].tx_prod = 0;
9600 tp->napi[i].tx_cons = 0;
9601 if (tg3_flag(tp, ENABLE_TSS))
9602 tw32_mailbox(tp->napi[i].prodmbox, 0);
9603 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9604 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9605 tp->napi[i].chk_msi_cnt = 0;
9606 tp->napi[i].last_rx_cons = 0;
9607 tp->napi[i].last_tx_cons = 0;
9609 if (!tg3_flag(tp, ENABLE_TSS))
9610 tw32_mailbox(tp->napi[0].prodmbox, 0);
9612 tp->napi[0].tx_prod = 0;
9613 tp->napi[0].tx_cons = 0;
9614 tw32_mailbox(tp->napi[0].prodmbox, 0);
9615 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9618 /* Make sure the NIC-based send BD rings are disabled. */
9619 if (!tg3_flag(tp, 5705_PLUS)) {
9620 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9621 for (i = 0; i < 16; i++)
9622 tw32_tx_mbox(mbox + i * 8, 0);
9625 /* Clear status block in ram. */
9626 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9628 /* Set status block DMA address */
9629 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9630 ((u64) tnapi->status_mapping >> 32));
9631 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9632 ((u64) tnapi->status_mapping & 0xffffffff));
9634 stblk = HOSTCC_STATBLCK_RING1;
9636 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9637 u64 mapping = (u64)tnapi->status_mapping;
9638 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9639 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9642 /* Clear status block in ram. */
9643 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9646 tg3_tx_rcbs_init(tp);
9647 tg3_rx_ret_rcbs_init(tp);
9650 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9652 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9654 if (!tg3_flag(tp, 5750_PLUS) ||
9655 tg3_flag(tp, 5780_CLASS) ||
9656 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9657 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9658 tg3_flag(tp, 57765_PLUS))
9659 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9660 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9661 tg3_asic_rev(tp) == ASIC_REV_5787)
9662 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9664 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9666 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9667 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9669 val = min(nic_rep_thresh, host_rep_thresh);
9670 tw32(RCVBDI_STD_THRESH, val);
9672 if (tg3_flag(tp, 57765_PLUS))
9673 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9675 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9678 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9680 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9682 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9683 tw32(RCVBDI_JUMBO_THRESH, val);
9685 if (tg3_flag(tp, 57765_PLUS))
9686 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9689 static inline u32 calc_crc(unsigned char *buf, int len)
9697 for (j = 0; j < len; j++) {
9700 for (k = 0; k < 8; k++) {
9706 reg ^= CRC32_POLY_LE;
9713 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9715 /* accept or reject all multicast frames */
9716 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9717 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9718 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9719 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9722 static void __tg3_set_rx_mode(struct net_device *dev)
9724 struct tg3 *tp = netdev_priv(dev);
9727 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9728 RX_MODE_KEEP_VLAN_TAG);
9730 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9731 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9734 if (!tg3_flag(tp, ENABLE_ASF))
9735 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9738 if (dev->flags & IFF_PROMISC) {
9739 /* Promiscuous mode. */
9740 rx_mode |= RX_MODE_PROMISC;
9741 } else if (dev->flags & IFF_ALLMULTI) {
9742 /* Accept all multicast. */
9743 tg3_set_multi(tp, 1);
9744 } else if (netdev_mc_empty(dev)) {
9745 /* Reject all multicast. */
9746 tg3_set_multi(tp, 0);
9748 /* Accept one or more multicast(s). */
9749 struct netdev_hw_addr *ha;
9750 u32 mc_filter[4] = { 0, };
9755 netdev_for_each_mc_addr(ha, dev) {
9756 crc = calc_crc(ha->addr, ETH_ALEN);
9758 regidx = (bit & 0x60) >> 5;
9760 mc_filter[regidx] |= (1 << bit);
9763 tw32(MAC_HASH_REG_0, mc_filter[0]);
9764 tw32(MAC_HASH_REG_1, mc_filter[1]);
9765 tw32(MAC_HASH_REG_2, mc_filter[2]);
9766 tw32(MAC_HASH_REG_3, mc_filter[3]);
9769 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9770 rx_mode |= RX_MODE_PROMISC;
9771 } else if (!(dev->flags & IFF_PROMISC)) {
9772 /* Add all entries into to the mac addr filter list */
9774 struct netdev_hw_addr *ha;
9776 netdev_for_each_uc_addr(ha, dev) {
9777 __tg3_set_one_mac_addr(tp, ha->addr,
9778 i + TG3_UCAST_ADDR_IDX(tp));
9783 if (rx_mode != tp->rx_mode) {
9784 tp->rx_mode = rx_mode;
9785 tw32_f(MAC_RX_MODE, rx_mode);
9790 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9794 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9795 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9798 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9802 if (!tg3_flag(tp, SUPPORT_MSIX))
9805 if (tp->rxq_cnt == 1) {
9806 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9810 /* Validate table against current IRQ count */
9811 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9812 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9816 if (i != TG3_RSS_INDIR_TBL_SIZE)
9817 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9820 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9823 u32 reg = MAC_RSS_INDIR_TBL_0;
9825 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9826 u32 val = tp->rss_ind_tbl[i];
9828 for (; i % 8; i++) {
9830 val |= tp->rss_ind_tbl[i];
9837 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9839 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9840 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9842 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9845 /* tp->lock is held. */
9846 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9848 u32 val, rdmac_mode;
9850 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9852 tg3_disable_ints(tp);
9856 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9858 if (tg3_flag(tp, INIT_COMPLETE))
9859 tg3_abort_hw(tp, 1);
9861 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9862 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9863 tg3_phy_pull_config(tp);
9864 tg3_eee_pull_config(tp, NULL);
9865 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9868 /* Enable MAC control of LPI */
9869 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9875 err = tg3_chip_reset(tp);
9879 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9881 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9882 val = tr32(TG3_CPMU_CTRL);
9883 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9884 tw32(TG3_CPMU_CTRL, val);
9886 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9887 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9888 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9889 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9891 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9892 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9893 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9894 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9896 val = tr32(TG3_CPMU_HST_ACC);
9897 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9898 val |= CPMU_HST_ACC_MACCLK_6_25;
9899 tw32(TG3_CPMU_HST_ACC, val);
9902 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9903 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9904 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9905 PCIE_PWR_MGMT_L1_THRESH_4MS;
9906 tw32(PCIE_PWR_MGMT_THRESH, val);
9908 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9909 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9911 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9913 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9914 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9917 if (tg3_flag(tp, L1PLLPD_EN)) {
9918 u32 grc_mode = tr32(GRC_MODE);
9920 /* Access the lower 1K of PL PCIE block registers. */
9921 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9922 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9924 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9925 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9926 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9928 tw32(GRC_MODE, grc_mode);
9931 if (tg3_flag(tp, 57765_CLASS)) {
9932 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9933 u32 grc_mode = tr32(GRC_MODE);
9935 /* Access the lower 1K of PL PCIE block registers. */
9936 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9937 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9939 val = tr32(TG3_PCIE_TLDLPL_PORT +
9940 TG3_PCIE_PL_LO_PHYCTL5);
9941 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9942 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9944 tw32(GRC_MODE, grc_mode);
9947 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9950 /* Fix transmit hangs */
9951 val = tr32(TG3_CPMU_PADRNG_CTL);
9952 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9953 tw32(TG3_CPMU_PADRNG_CTL, val);
9955 grc_mode = tr32(GRC_MODE);
9957 /* Access the lower 1K of DL PCIE block registers. */
9958 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9959 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9961 val = tr32(TG3_PCIE_TLDLPL_PORT +
9962 TG3_PCIE_DL_LO_FTSMAX);
9963 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9964 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9965 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9967 tw32(GRC_MODE, grc_mode);
9970 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9971 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9972 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9973 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9976 /* This works around an issue with Athlon chipsets on
9977 * B3 tigon3 silicon. This bit has no effect on any
9978 * other revision. But do not set this on PCI Express
9979 * chips and don't even touch the clocks if the CPMU is present.
9981 if (!tg3_flag(tp, CPMU_PRESENT)) {
9982 if (!tg3_flag(tp, PCI_EXPRESS))
9983 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9984 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9987 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9988 tg3_flag(tp, PCIX_MODE)) {
9989 val = tr32(TG3PCI_PCISTATE);
9990 val |= PCISTATE_RETRY_SAME_DMA;
9991 tw32(TG3PCI_PCISTATE, val);
9994 if (tg3_flag(tp, ENABLE_APE)) {
9995 /* Allow reads and writes to the
9996 * APE register and memory space.
9998 val = tr32(TG3PCI_PCISTATE);
9999 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10000 PCISTATE_ALLOW_APE_SHMEM_WR |
10001 PCISTATE_ALLOW_APE_PSPACE_WR;
10002 tw32(TG3PCI_PCISTATE, val);
10005 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10006 /* Enable some hw fixes. */
10007 val = tr32(TG3PCI_MSI_DATA);
10008 val |= (1 << 26) | (1 << 28) | (1 << 29);
10009 tw32(TG3PCI_MSI_DATA, val);
10012 /* Descriptor ring init may make accesses to the
10013 * NIC SRAM area to setup the TX descriptors, so we
10014 * can only do this after the hardware has been
10015 * successfully reset.
10017 err = tg3_init_rings(tp);
10021 if (tg3_flag(tp, 57765_PLUS)) {
10022 val = tr32(TG3PCI_DMA_RW_CTRL) &
10023 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10024 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10025 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10026 if (!tg3_flag(tp, 57765_CLASS) &&
10027 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10028 tg3_asic_rev(tp) != ASIC_REV_5762)
10029 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10030 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10031 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10032 tg3_asic_rev(tp) != ASIC_REV_5761) {
10033 /* This value is determined during the probe time DMA
10034 * engine test, tg3_test_dma.
10036 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10039 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10040 GRC_MODE_4X_NIC_SEND_RINGS |
10041 GRC_MODE_NO_TX_PHDR_CSUM |
10042 GRC_MODE_NO_RX_PHDR_CSUM);
10043 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10045 /* Pseudo-header checksum is done by hardware logic and not
10046 * the offload processers, so make the chip do the pseudo-
10047 * header checksums on receive. For transmit it is more
10048 * convenient to do the pseudo-header checksum in software
10049 * as Linux does that on transmit for us in all cases.
10051 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10053 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10055 tw32(TG3_RX_PTP_CTL,
10056 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10058 if (tg3_flag(tp, PTP_CAPABLE))
10059 val |= GRC_MODE_TIME_SYNC_ENABLE;
10061 tw32(GRC_MODE, tp->grc_mode | val);
10063 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10064 * south bridge limitation. As a workaround, Driver is setting MRRS
10065 * to 2048 instead of default 4096.
10067 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10068 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10069 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10070 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10073 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10074 val = tr32(GRC_MISC_CFG);
10076 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10077 tw32(GRC_MISC_CFG, val);
10079 /* Initialize MBUF/DESC pool. */
10080 if (tg3_flag(tp, 5750_PLUS)) {
10082 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10083 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10084 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10085 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10087 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10088 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10089 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10090 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10093 fw_len = tp->fw_len;
10094 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10095 tw32(BUFMGR_MB_POOL_ADDR,
10096 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10097 tw32(BUFMGR_MB_POOL_SIZE,
10098 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10101 if (tp->dev->mtu <= ETH_DATA_LEN) {
10102 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10103 tp->bufmgr_config.mbuf_read_dma_low_water);
10104 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10105 tp->bufmgr_config.mbuf_mac_rx_low_water);
10106 tw32(BUFMGR_MB_HIGH_WATER,
10107 tp->bufmgr_config.mbuf_high_water);
10109 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10110 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10111 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10112 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10113 tw32(BUFMGR_MB_HIGH_WATER,
10114 tp->bufmgr_config.mbuf_high_water_jumbo);
10116 tw32(BUFMGR_DMA_LOW_WATER,
10117 tp->bufmgr_config.dma_low_water);
10118 tw32(BUFMGR_DMA_HIGH_WATER,
10119 tp->bufmgr_config.dma_high_water);
10121 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10122 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10123 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10124 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10125 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10126 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10127 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10128 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10129 tw32(BUFMGR_MODE, val);
10130 for (i = 0; i < 2000; i++) {
10131 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10136 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10140 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10141 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10143 tg3_setup_rxbd_thresholds(tp);
10145 /* Initialize TG3_BDINFO's at:
10146 * RCVDBDI_STD_BD: standard eth size rx ring
10147 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10148 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10151 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10152 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10153 * ring attribute flags
10154 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10156 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10157 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10159 * The size of each ring is fixed in the firmware, but the location is
10162 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10163 ((u64) tpr->rx_std_mapping >> 32));
10164 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10165 ((u64) tpr->rx_std_mapping & 0xffffffff));
10166 if (!tg3_flag(tp, 5717_PLUS))
10167 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10168 NIC_SRAM_RX_BUFFER_DESC);
10170 /* Disable the mini ring */
10171 if (!tg3_flag(tp, 5705_PLUS))
10172 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10173 BDINFO_FLAGS_DISABLED);
10175 /* Program the jumbo buffer descriptor ring control
10176 * blocks on those devices that have them.
10178 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10179 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10181 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10182 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10183 ((u64) tpr->rx_jmb_mapping >> 32));
10184 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10185 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10186 val = TG3_RX_JMB_RING_SIZE(tp) <<
10187 BDINFO_FLAGS_MAXLEN_SHIFT;
10188 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10189 val | BDINFO_FLAGS_USE_EXT_RECV);
10190 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10191 tg3_flag(tp, 57765_CLASS) ||
10192 tg3_asic_rev(tp) == ASIC_REV_5762)
10193 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10194 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10196 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10197 BDINFO_FLAGS_DISABLED);
10200 if (tg3_flag(tp, 57765_PLUS)) {
10201 val = TG3_RX_STD_RING_SIZE(tp);
10202 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10203 val |= (TG3_RX_STD_DMA_SZ << 2);
10205 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10207 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10209 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10211 tpr->rx_std_prod_idx = tp->rx_pending;
10212 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10214 tpr->rx_jmb_prod_idx =
10215 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10216 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10218 tg3_rings_reset(tp);
10220 /* Initialize MAC address and backoff seed. */
10221 __tg3_set_mac_addr(tp, false);
10223 /* MTU + ethernet header + FCS + optional VLAN tag */
10224 tw32(MAC_RX_MTU_SIZE,
10225 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10227 /* The slot time is changed by tg3_setup_phy if we
10228 * run at gigabit with half duplex.
10230 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10231 (6 << TX_LENGTHS_IPG_SHIFT) |
10232 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10234 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10235 tg3_asic_rev(tp) == ASIC_REV_5762)
10236 val |= tr32(MAC_TX_LENGTHS) &
10237 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10238 TX_LENGTHS_CNT_DWN_VAL_MSK);
10240 tw32(MAC_TX_LENGTHS, val);
10242 /* Receive rules. */
10243 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10244 tw32(RCVLPC_CONFIG, 0x0181);
10246 /* Calculate RDMAC_MODE setting early, we need it to determine
10247 * the RCVLPC_STATE_ENABLE mask.
10249 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10250 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10251 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10252 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10253 RDMAC_MODE_LNGREAD_ENAB);
10255 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10256 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10258 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10259 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10260 tg3_asic_rev(tp) == ASIC_REV_57780)
10261 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10262 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10263 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10265 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10266 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10267 if (tg3_flag(tp, TSO_CAPABLE)) {
10268 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10269 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10270 !tg3_flag(tp, IS_5788)) {
10271 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10275 if (tg3_flag(tp, PCI_EXPRESS))
10276 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10278 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10280 if (tp->dev->mtu <= ETH_DATA_LEN) {
10281 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10282 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10286 if (tg3_flag(tp, HW_TSO_1) ||
10287 tg3_flag(tp, HW_TSO_2) ||
10288 tg3_flag(tp, HW_TSO_3))
10289 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10291 if (tg3_flag(tp, 57765_PLUS) ||
10292 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10293 tg3_asic_rev(tp) == ASIC_REV_57780)
10294 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10296 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10297 tg3_asic_rev(tp) == ASIC_REV_5762)
10298 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10300 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10301 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10302 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10303 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10304 tg3_flag(tp, 57765_PLUS)) {
10307 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10308 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10310 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10312 val = tr32(tgtreg);
10313 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10314 tg3_asic_rev(tp) == ASIC_REV_5762) {
10315 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10316 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10317 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10318 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10319 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10320 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10322 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10325 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10326 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10327 tg3_asic_rev(tp) == ASIC_REV_5762) {
10330 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10331 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10333 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10335 val = tr32(tgtreg);
10337 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10338 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10341 /* Receive/send statistics. */
10342 if (tg3_flag(tp, 5750_PLUS)) {
10343 val = tr32(RCVLPC_STATS_ENABLE);
10344 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10345 tw32(RCVLPC_STATS_ENABLE, val);
10346 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10347 tg3_flag(tp, TSO_CAPABLE)) {
10348 val = tr32(RCVLPC_STATS_ENABLE);
10349 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10350 tw32(RCVLPC_STATS_ENABLE, val);
10352 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10354 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10355 tw32(SNDDATAI_STATSENAB, 0xffffff);
10356 tw32(SNDDATAI_STATSCTRL,
10357 (SNDDATAI_SCTRL_ENABLE |
10358 SNDDATAI_SCTRL_FASTUPD));
10360 /* Setup host coalescing engine. */
10361 tw32(HOSTCC_MODE, 0);
10362 for (i = 0; i < 2000; i++) {
10363 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10368 __tg3_set_coalesce(tp, &tp->coal);
10370 if (!tg3_flag(tp, 5705_PLUS)) {
10371 /* Status/statistics block address. See tg3_timer,
10372 * the tg3_periodic_fetch_stats call there, and
10373 * tg3_get_stats to see how this works for 5705/5750 chips.
10375 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10376 ((u64) tp->stats_mapping >> 32));
10377 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10378 ((u64) tp->stats_mapping & 0xffffffff));
10379 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10381 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10383 /* Clear statistics and status block memory areas */
10384 for (i = NIC_SRAM_STATS_BLK;
10385 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10386 i += sizeof(u32)) {
10387 tg3_write_mem(tp, i, 0);
10392 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10394 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10395 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10396 if (!tg3_flag(tp, 5705_PLUS))
10397 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10399 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10400 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10401 /* reset to prevent losing 1st rx packet intermittently */
10402 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10406 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10407 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10408 MAC_MODE_FHDE_ENABLE;
10409 if (tg3_flag(tp, ENABLE_APE))
10410 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10411 if (!tg3_flag(tp, 5705_PLUS) &&
10412 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10413 tg3_asic_rev(tp) != ASIC_REV_5700)
10414 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10415 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10418 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10419 * If TG3_FLAG_IS_NIC is zero, we should read the
10420 * register to preserve the GPIO settings for LOMs. The GPIOs,
10421 * whether used as inputs or outputs, are set by boot code after
10424 if (!tg3_flag(tp, IS_NIC)) {
10427 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10428 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10429 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10431 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10432 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10433 GRC_LCLCTRL_GPIO_OUTPUT3;
10435 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10436 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10438 tp->grc_local_ctrl &= ~gpio_mask;
10439 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10441 /* GPIO1 must be driven high for eeprom write protect */
10442 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10443 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10444 GRC_LCLCTRL_GPIO_OUTPUT1);
10446 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10449 if (tg3_flag(tp, USING_MSIX)) {
10450 val = tr32(MSGINT_MODE);
10451 val |= MSGINT_MODE_ENABLE;
10452 if (tp->irq_cnt > 1)
10453 val |= MSGINT_MODE_MULTIVEC_EN;
10454 if (!tg3_flag(tp, 1SHOT_MSI))
10455 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10456 tw32(MSGINT_MODE, val);
10459 if (!tg3_flag(tp, 5705_PLUS)) {
10460 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10464 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10465 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10466 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10467 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10468 WDMAC_MODE_LNGREAD_ENAB);
10470 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10471 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10472 if (tg3_flag(tp, TSO_CAPABLE) &&
10473 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10474 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10476 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10477 !tg3_flag(tp, IS_5788)) {
10478 val |= WDMAC_MODE_RX_ACCEL;
10482 /* Enable host coalescing bug fix */
10483 if (tg3_flag(tp, 5755_PLUS))
10484 val |= WDMAC_MODE_STATUS_TAG_FIX;
10486 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10487 val |= WDMAC_MODE_BURST_ALL_DATA;
10489 tw32_f(WDMAC_MODE, val);
10492 if (tg3_flag(tp, PCIX_MODE)) {
10495 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10497 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10498 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10499 pcix_cmd |= PCI_X_CMD_READ_2K;
10500 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10501 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10502 pcix_cmd |= PCI_X_CMD_READ_2K;
10504 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10508 tw32_f(RDMAC_MODE, rdmac_mode);
10511 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10512 tg3_asic_rev(tp) == ASIC_REV_5720) {
10513 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10514 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10517 if (i < TG3_NUM_RDMA_CHANNELS) {
10518 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10519 val |= tg3_lso_rd_dma_workaround_bit(tp);
10520 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10521 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10525 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10526 if (!tg3_flag(tp, 5705_PLUS))
10527 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10529 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10530 tw32(SNDDATAC_MODE,
10531 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10533 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10535 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10536 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10537 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10538 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10539 val |= RCVDBDI_MODE_LRG_RING_SZ;
10540 tw32(RCVDBDI_MODE, val);
10541 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10542 if (tg3_flag(tp, HW_TSO_1) ||
10543 tg3_flag(tp, HW_TSO_2) ||
10544 tg3_flag(tp, HW_TSO_3))
10545 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10546 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10547 if (tg3_flag(tp, ENABLE_TSS))
10548 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10549 tw32(SNDBDI_MODE, val);
10550 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10552 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10553 err = tg3_load_5701_a0_firmware_fix(tp);
10558 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10559 /* Ignore any errors for the firmware download. If download
10560 * fails, the device will operate with EEE disabled
10562 tg3_load_57766_firmware(tp);
10565 if (tg3_flag(tp, TSO_CAPABLE)) {
10566 err = tg3_load_tso_firmware(tp);
10571 tp->tx_mode = TX_MODE_ENABLE;
10573 if (tg3_flag(tp, 5755_PLUS) ||
10574 tg3_asic_rev(tp) == ASIC_REV_5906)
10575 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10577 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10578 tg3_asic_rev(tp) == ASIC_REV_5762) {
10579 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10580 tp->tx_mode &= ~val;
10581 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10584 tw32_f(MAC_TX_MODE, tp->tx_mode);
10587 if (tg3_flag(tp, ENABLE_RSS)) {
10590 tg3_rss_write_indir_tbl(tp);
10592 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10594 for (i = 0; i < 10 ; i++)
10595 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10598 tp->rx_mode = RX_MODE_ENABLE;
10599 if (tg3_flag(tp, 5755_PLUS))
10600 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10602 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10603 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10605 if (tg3_flag(tp, ENABLE_RSS))
10606 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10607 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10608 RX_MODE_RSS_IPV6_HASH_EN |
10609 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10610 RX_MODE_RSS_IPV4_HASH_EN |
10611 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10613 tw32_f(MAC_RX_MODE, tp->rx_mode);
10616 tw32(MAC_LED_CTRL, tp->led_ctrl);
10618 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10619 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10620 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10623 tw32_f(MAC_RX_MODE, tp->rx_mode);
10626 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10627 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10628 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10629 /* Set drive transmission level to 1.2V */
10630 /* only if the signal pre-emphasis bit is not set */
10631 val = tr32(MAC_SERDES_CFG);
10634 tw32(MAC_SERDES_CFG, val);
10636 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10637 tw32(MAC_SERDES_CFG, 0x616000);
10640 /* Prevent chip from dropping frames when flow control
10643 if (tg3_flag(tp, 57765_CLASS))
10647 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10649 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10650 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10651 /* Use hardware link auto-negotiation */
10652 tg3_flag_set(tp, HW_AUTONEG);
10655 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10656 tg3_asic_rev(tp) == ASIC_REV_5714) {
10659 tmp = tr32(SERDES_RX_CTRL);
10660 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10661 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10662 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10663 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10666 if (!tg3_flag(tp, USE_PHYLIB)) {
10667 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10668 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10670 err = tg3_setup_phy(tp, false);
10674 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10675 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10678 /* Clear CRC stats. */
10679 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10680 tg3_writephy(tp, MII_TG3_TEST1,
10681 tmp | MII_TG3_TEST1_CRC_EN);
10682 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10687 __tg3_set_rx_mode(tp->dev);
10689 /* Initialize receive rules. */
10690 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10691 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10692 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10693 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10695 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10699 if (tg3_flag(tp, ENABLE_ASF))
10703 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10706 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10709 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10712 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10715 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10718 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10721 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10724 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10727 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10730 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10733 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10736 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10739 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10741 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10749 if (tg3_flag(tp, ENABLE_APE))
10750 /* Write our heartbeat update interval to APE. */
10751 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10752 APE_HOST_HEARTBEAT_INT_5SEC);
10754 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10759 /* Called at device open time to get the chip ready for
10760 * packet processing. Invoked with tp->lock held.
10762 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10764 /* Chip may have been just powered on. If so, the boot code may still
10765 * be running initialization. Wait for it to finish to avoid races in
10766 * accessing the hardware.
10768 tg3_enable_register_access(tp);
10771 tg3_switch_clocks(tp);
10773 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10775 return tg3_reset_hw(tp, reset_phy);
10778 #ifdef CONFIG_TIGON3_HWMON
10779 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10781 u32 off, len = TG3_OCIR_LEN;
10784 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10785 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10787 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10788 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10789 memset(ocir, 0, len);
10793 /* sysfs attributes for hwmon */
10794 static ssize_t tg3_show_temp(struct device *dev,
10795 struct device_attribute *devattr, char *buf)
10797 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10798 struct tg3 *tp = dev_get_drvdata(dev);
10801 spin_lock_bh(&tp->lock);
10802 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10803 sizeof(temperature));
10804 spin_unlock_bh(&tp->lock);
10805 return sprintf(buf, "%u\n", temperature * 1000);
10809 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10810 TG3_TEMP_SENSOR_OFFSET);
10811 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10812 TG3_TEMP_CAUTION_OFFSET);
10813 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10814 TG3_TEMP_MAX_OFFSET);
10816 static struct attribute *tg3_attrs[] = {
10817 &sensor_dev_attr_temp1_input.dev_attr.attr,
10818 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10819 &sensor_dev_attr_temp1_max.dev_attr.attr,
10822 ATTRIBUTE_GROUPS(tg3);
10824 static void tg3_hwmon_close(struct tg3 *tp)
10826 if (tp->hwmon_dev) {
10827 hwmon_device_unregister(tp->hwmon_dev);
10828 tp->hwmon_dev = NULL;
10832 static void tg3_hwmon_open(struct tg3 *tp)
10836 struct pci_dev *pdev = tp->pdev;
10837 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10839 tg3_sd_scan_scratchpad(tp, ocirs);
10841 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10842 if (!ocirs[i].src_data_length)
10845 size += ocirs[i].src_hdr_length;
10846 size += ocirs[i].src_data_length;
10852 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10854 if (IS_ERR(tp->hwmon_dev)) {
10855 tp->hwmon_dev = NULL;
10856 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10860 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10861 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10862 #endif /* CONFIG_TIGON3_HWMON */
10865 #define TG3_STAT_ADD32(PSTAT, REG) \
10866 do { u32 __val = tr32(REG); \
10867 (PSTAT)->low += __val; \
10868 if ((PSTAT)->low < __val) \
10869 (PSTAT)->high += 1; \
10872 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10874 struct tg3_hw_stats *sp = tp->hw_stats;
10879 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10880 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10881 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10882 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10883 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10884 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10885 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10886 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10887 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10888 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10889 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10890 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10891 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10892 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10893 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10894 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10897 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10898 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10899 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10900 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10903 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10904 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10905 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10906 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10907 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10908 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10909 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10910 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10911 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10912 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10913 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10914 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10915 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10916 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10918 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10919 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10920 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10921 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10922 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10923 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10925 u32 val = tr32(HOSTCC_FLOW_ATTN);
10926 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10928 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10929 sp->rx_discards.low += val;
10930 if (sp->rx_discards.low < val)
10931 sp->rx_discards.high += 1;
10933 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10935 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10938 static void tg3_chk_missed_msi(struct tg3 *tp)
10942 for (i = 0; i < tp->irq_cnt; i++) {
10943 struct tg3_napi *tnapi = &tp->napi[i];
10945 if (tg3_has_work(tnapi)) {
10946 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10947 tnapi->last_tx_cons == tnapi->tx_cons) {
10948 if (tnapi->chk_msi_cnt < 1) {
10949 tnapi->chk_msi_cnt++;
10955 tnapi->chk_msi_cnt = 0;
10956 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10957 tnapi->last_tx_cons = tnapi->tx_cons;
10961 static void tg3_timer(struct timer_list *t)
10963 struct tg3 *tp = from_timer(tp, t, timer);
10965 spin_lock(&tp->lock);
10967 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10968 spin_unlock(&tp->lock);
10969 goto restart_timer;
10972 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10973 tg3_flag(tp, 57765_CLASS))
10974 tg3_chk_missed_msi(tp);
10976 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10977 /* BCM4785: Flush posted writes from GbE to host memory. */
10981 if (!tg3_flag(tp, TAGGED_STATUS)) {
10982 /* All of this garbage is because when using non-tagged
10983 * IRQ status the mailbox/status_block protocol the chip
10984 * uses with the cpu is race prone.
10986 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10987 tw32(GRC_LOCAL_CTRL,
10988 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10990 tw32(HOSTCC_MODE, tp->coalesce_mode |
10991 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10994 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10995 spin_unlock(&tp->lock);
10996 tg3_reset_task_schedule(tp);
10997 goto restart_timer;
11001 /* This part only runs once per second. */
11002 if (!--tp->timer_counter) {
11003 if (tg3_flag(tp, 5705_PLUS))
11004 tg3_periodic_fetch_stats(tp);
11006 if (tp->setlpicnt && !--tp->setlpicnt)
11007 tg3_phy_eee_enable(tp);
11009 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11013 mac_stat = tr32(MAC_STATUS);
11016 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11017 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11019 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11023 tg3_setup_phy(tp, false);
11024 } else if (tg3_flag(tp, POLL_SERDES)) {
11025 u32 mac_stat = tr32(MAC_STATUS);
11026 int need_setup = 0;
11029 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11032 if (!tp->link_up &&
11033 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11034 MAC_STATUS_SIGNAL_DET))) {
11038 if (!tp->serdes_counter) {
11041 ~MAC_MODE_PORT_MODE_MASK));
11043 tw32_f(MAC_MODE, tp->mac_mode);
11046 tg3_setup_phy(tp, false);
11048 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11049 tg3_flag(tp, 5780_CLASS)) {
11050 tg3_serdes_parallel_detect(tp);
11051 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11052 u32 cpmu = tr32(TG3_CPMU_STATUS);
11053 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11054 TG3_CPMU_STATUS_LINK_MASK);
11056 if (link_up != tp->link_up)
11057 tg3_setup_phy(tp, false);
11060 tp->timer_counter = tp->timer_multiplier;
11063 /* Heartbeat is only sent once every 2 seconds.
11065 * The heartbeat is to tell the ASF firmware that the host
11066 * driver is still alive. In the event that the OS crashes,
11067 * ASF needs to reset the hardware to free up the FIFO space
11068 * that may be filled with rx packets destined for the host.
11069 * If the FIFO is full, ASF will no longer function properly.
11071 * Unintended resets have been reported on real time kernels
11072 * where the timer doesn't run on time. Netpoll will also have
11075 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11076 * to check the ring condition when the heartbeat is expiring
11077 * before doing the reset. This will prevent most unintended
11080 if (!--tp->asf_counter) {
11081 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11082 tg3_wait_for_event_ack(tp);
11084 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11085 FWCMD_NICDRV_ALIVE3);
11086 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11087 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11088 TG3_FW_UPDATE_TIMEOUT_SEC);
11090 tg3_generate_fw_event(tp);
11092 tp->asf_counter = tp->asf_multiplier;
11095 /* Update the APE heartbeat every 5 seconds.*/
11096 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11098 spin_unlock(&tp->lock);
11101 tp->timer.expires = jiffies + tp->timer_offset;
11102 add_timer(&tp->timer);
11105 static void tg3_timer_init(struct tg3 *tp)
11107 if (tg3_flag(tp, TAGGED_STATUS) &&
11108 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11109 !tg3_flag(tp, 57765_CLASS))
11110 tp->timer_offset = HZ;
11112 tp->timer_offset = HZ / 10;
11114 BUG_ON(tp->timer_offset > HZ);
11116 tp->timer_multiplier = (HZ / tp->timer_offset);
11117 tp->asf_multiplier = (HZ / tp->timer_offset) *
11118 TG3_FW_UPDATE_FREQ_SEC;
11120 timer_setup(&tp->timer, tg3_timer, 0);
11123 static void tg3_timer_start(struct tg3 *tp)
11125 tp->asf_counter = tp->asf_multiplier;
11126 tp->timer_counter = tp->timer_multiplier;
11128 tp->timer.expires = jiffies + tp->timer_offset;
11129 add_timer(&tp->timer);
11132 static void tg3_timer_stop(struct tg3 *tp)
11134 del_timer_sync(&tp->timer);
11137 /* Restart hardware after configuration changes, self-test, etc.
11138 * Invoked with tp->lock held.
11140 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11141 __releases(tp->lock)
11142 __acquires(tp->lock)
11146 err = tg3_init_hw(tp, reset_phy);
11148 netdev_err(tp->dev,
11149 "Failed to re-initialize device, aborting\n");
11150 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11151 tg3_full_unlock(tp);
11152 tg3_timer_stop(tp);
11154 tg3_napi_enable(tp);
11155 dev_close(tp->dev);
11156 tg3_full_lock(tp, 0);
11161 static void tg3_reset_task(struct work_struct *work)
11163 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11167 tg3_full_lock(tp, 0);
11169 if (tp->pcierr_recovery || !netif_running(tp->dev)) {
11170 tg3_flag_clear(tp, RESET_TASK_PENDING);
11171 tg3_full_unlock(tp);
11176 tg3_full_unlock(tp);
11180 tg3_netif_stop(tp);
11182 tg3_full_lock(tp, 1);
11184 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11185 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11186 tp->write32_rx_mbox = tg3_write_flush_reg32;
11187 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11188 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11191 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11192 err = tg3_init_hw(tp, true);
11194 tg3_full_unlock(tp);
11196 tg3_napi_enable(tp);
11197 /* Clear this flag so that tg3_reset_task_cancel() will not
11198 * call cancel_work_sync() and wait forever.
11200 tg3_flag_clear(tp, RESET_TASK_PENDING);
11201 dev_close(tp->dev);
11205 tg3_netif_start(tp);
11206 tg3_full_unlock(tp);
11208 tg3_flag_clear(tp, RESET_TASK_PENDING);
11213 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11216 unsigned long flags;
11218 struct tg3_napi *tnapi = &tp->napi[irq_num];
11220 if (tp->irq_cnt == 1)
11221 name = tp->dev->name;
11223 name = &tnapi->irq_lbl[0];
11224 if (tnapi->tx_buffers && tnapi->rx_rcb)
11225 snprintf(name, IFNAMSIZ,
11226 "%s-txrx-%d", tp->dev->name, irq_num);
11227 else if (tnapi->tx_buffers)
11228 snprintf(name, IFNAMSIZ,
11229 "%s-tx-%d", tp->dev->name, irq_num);
11230 else if (tnapi->rx_rcb)
11231 snprintf(name, IFNAMSIZ,
11232 "%s-rx-%d", tp->dev->name, irq_num);
11234 snprintf(name, IFNAMSIZ,
11235 "%s-%d", tp->dev->name, irq_num);
11236 name[IFNAMSIZ-1] = 0;
11239 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11241 if (tg3_flag(tp, 1SHOT_MSI))
11242 fn = tg3_msi_1shot;
11245 fn = tg3_interrupt;
11246 if (tg3_flag(tp, TAGGED_STATUS))
11247 fn = tg3_interrupt_tagged;
11248 flags = IRQF_SHARED;
11251 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11254 static int tg3_test_interrupt(struct tg3 *tp)
11256 struct tg3_napi *tnapi = &tp->napi[0];
11257 struct net_device *dev = tp->dev;
11258 int err, i, intr_ok = 0;
11261 if (!netif_running(dev))
11264 tg3_disable_ints(tp);
11266 free_irq(tnapi->irq_vec, tnapi);
11269 * Turn off MSI one shot mode. Otherwise this test has no
11270 * observable way to know whether the interrupt was delivered.
11272 if (tg3_flag(tp, 57765_PLUS)) {
11273 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11274 tw32(MSGINT_MODE, val);
11277 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11278 IRQF_SHARED, dev->name, tnapi);
11282 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11283 tg3_enable_ints(tp);
11285 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11288 for (i = 0; i < 5; i++) {
11289 u32 int_mbox, misc_host_ctrl;
11291 int_mbox = tr32_mailbox(tnapi->int_mbox);
11292 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11294 if ((int_mbox != 0) ||
11295 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11300 if (tg3_flag(tp, 57765_PLUS) &&
11301 tnapi->hw_status->status_tag != tnapi->last_tag)
11302 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11307 tg3_disable_ints(tp);
11309 free_irq(tnapi->irq_vec, tnapi);
11311 err = tg3_request_irq(tp, 0);
11317 /* Reenable MSI one shot mode. */
11318 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11319 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11320 tw32(MSGINT_MODE, val);
11328 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11329 * successfully restored
11331 static int tg3_test_msi(struct tg3 *tp)
11336 if (!tg3_flag(tp, USING_MSI))
11339 /* Turn off SERR reporting in case MSI terminates with Master
11342 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11343 pci_write_config_word(tp->pdev, PCI_COMMAND,
11344 pci_cmd & ~PCI_COMMAND_SERR);
11346 err = tg3_test_interrupt(tp);
11348 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11353 /* other failures */
11357 /* MSI test failed, go back to INTx mode */
11358 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11359 "to INTx mode. Please report this failure to the PCI "
11360 "maintainer and include system chipset information\n");
11362 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11364 pci_disable_msi(tp->pdev);
11366 tg3_flag_clear(tp, USING_MSI);
11367 tp->napi[0].irq_vec = tp->pdev->irq;
11369 err = tg3_request_irq(tp, 0);
11373 /* Need to reset the chip because the MSI cycle may have terminated
11374 * with Master Abort.
11376 tg3_full_lock(tp, 1);
11378 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11379 err = tg3_init_hw(tp, true);
11381 tg3_full_unlock(tp);
11384 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11389 static int tg3_request_firmware(struct tg3 *tp)
11391 const struct tg3_firmware_hdr *fw_hdr;
11393 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11394 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11399 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11401 /* Firmware blob starts with version numbers, followed by
11402 * start address and _full_ length including BSS sections
11403 * (which must be longer than the actual data, of course
11406 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11407 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11408 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11409 tp->fw_len, tp->fw_needed);
11410 release_firmware(tp->fw);
11415 /* We no longer need firmware; we have it. */
11416 tp->fw_needed = NULL;
11420 static u32 tg3_irq_count(struct tg3 *tp)
11422 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11425 /* We want as many rx rings enabled as there are cpus.
11426 * In multiqueue MSI-X mode, the first MSI-X vector
11427 * only deals with link interrupts, etc, so we add
11428 * one to the number of vectors we are requesting.
11430 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11436 static bool tg3_enable_msix(struct tg3 *tp)
11439 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11441 tp->txq_cnt = tp->txq_req;
11442 tp->rxq_cnt = tp->rxq_req;
11444 tp->rxq_cnt = netif_get_num_default_rss_queues();
11445 if (tp->rxq_cnt > tp->rxq_max)
11446 tp->rxq_cnt = tp->rxq_max;
11448 /* Disable multiple TX rings by default. Simple round-robin hardware
11449 * scheduling of the TX rings can cause starvation of rings with
11450 * small packets when other rings have TSO or jumbo packets.
11455 tp->irq_cnt = tg3_irq_count(tp);
11457 for (i = 0; i < tp->irq_max; i++) {
11458 msix_ent[i].entry = i;
11459 msix_ent[i].vector = 0;
11462 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11465 } else if (rc < tp->irq_cnt) {
11466 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11469 tp->rxq_cnt = max(rc - 1, 1);
11471 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11474 for (i = 0; i < tp->irq_max; i++)
11475 tp->napi[i].irq_vec = msix_ent[i].vector;
11477 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11478 pci_disable_msix(tp->pdev);
11482 if (tp->irq_cnt == 1)
11485 tg3_flag_set(tp, ENABLE_RSS);
11487 if (tp->txq_cnt > 1)
11488 tg3_flag_set(tp, ENABLE_TSS);
11490 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11495 static void tg3_ints_init(struct tg3 *tp)
11497 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11498 !tg3_flag(tp, TAGGED_STATUS)) {
11499 /* All MSI supporting chips should support tagged
11500 * status. Assert that this is the case.
11502 netdev_warn(tp->dev,
11503 "MSI without TAGGED_STATUS? Not using MSI\n");
11507 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11508 tg3_flag_set(tp, USING_MSIX);
11509 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11510 tg3_flag_set(tp, USING_MSI);
11512 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11513 u32 msi_mode = tr32(MSGINT_MODE);
11514 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11515 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11516 if (!tg3_flag(tp, 1SHOT_MSI))
11517 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11518 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11521 if (!tg3_flag(tp, USING_MSIX)) {
11523 tp->napi[0].irq_vec = tp->pdev->irq;
11526 if (tp->irq_cnt == 1) {
11529 netif_set_real_num_tx_queues(tp->dev, 1);
11530 netif_set_real_num_rx_queues(tp->dev, 1);
11534 static void tg3_ints_fini(struct tg3 *tp)
11536 if (tg3_flag(tp, USING_MSIX))
11537 pci_disable_msix(tp->pdev);
11538 else if (tg3_flag(tp, USING_MSI))
11539 pci_disable_msi(tp->pdev);
11540 tg3_flag_clear(tp, USING_MSI);
11541 tg3_flag_clear(tp, USING_MSIX);
11542 tg3_flag_clear(tp, ENABLE_RSS);
11543 tg3_flag_clear(tp, ENABLE_TSS);
11546 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11549 struct net_device *dev = tp->dev;
11553 * Setup interrupts first so we know how
11554 * many NAPI resources to allocate
11558 tg3_rss_check_indir_tbl(tp);
11560 /* The placement of this call is tied
11561 * to the setup and use of Host TX descriptors.
11563 err = tg3_alloc_consistent(tp);
11565 goto out_ints_fini;
11569 tg3_napi_enable(tp);
11571 for (i = 0; i < tp->irq_cnt; i++) {
11572 err = tg3_request_irq(tp, i);
11574 for (i--; i >= 0; i--) {
11575 struct tg3_napi *tnapi = &tp->napi[i];
11577 free_irq(tnapi->irq_vec, tnapi);
11579 goto out_napi_fini;
11583 tg3_full_lock(tp, 0);
11586 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11588 err = tg3_init_hw(tp, reset_phy);
11590 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11591 tg3_free_rings(tp);
11594 tg3_full_unlock(tp);
11599 if (test_irq && tg3_flag(tp, USING_MSI)) {
11600 err = tg3_test_msi(tp);
11603 tg3_full_lock(tp, 0);
11604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605 tg3_free_rings(tp);
11606 tg3_full_unlock(tp);
11608 goto out_napi_fini;
11611 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11612 u32 val = tr32(PCIE_TRANSACTION_CFG);
11614 tw32(PCIE_TRANSACTION_CFG,
11615 val | PCIE_TRANS_CFG_1SHOT_MSI);
11621 tg3_hwmon_open(tp);
11623 tg3_full_lock(tp, 0);
11625 tg3_timer_start(tp);
11626 tg3_flag_set(tp, INIT_COMPLETE);
11627 tg3_enable_ints(tp);
11629 tg3_ptp_resume(tp);
11631 tg3_full_unlock(tp);
11633 netif_tx_start_all_queues(dev);
11636 * Reset loopback feature if it was turned on while the device was down
11637 * make sure that it's installed properly now.
11639 if (dev->features & NETIF_F_LOOPBACK)
11640 tg3_set_loopback(dev, dev->features);
11645 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11646 struct tg3_napi *tnapi = &tp->napi[i];
11647 free_irq(tnapi->irq_vec, tnapi);
11651 tg3_napi_disable(tp);
11653 tg3_free_consistent(tp);
11661 static void tg3_stop(struct tg3 *tp)
11665 tg3_reset_task_cancel(tp);
11666 tg3_netif_stop(tp);
11668 tg3_timer_stop(tp);
11670 tg3_hwmon_close(tp);
11674 tg3_full_lock(tp, 1);
11676 tg3_disable_ints(tp);
11678 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11679 tg3_free_rings(tp);
11680 tg3_flag_clear(tp, INIT_COMPLETE);
11682 tg3_full_unlock(tp);
11684 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11685 struct tg3_napi *tnapi = &tp->napi[i];
11686 free_irq(tnapi->irq_vec, tnapi);
11693 tg3_free_consistent(tp);
11696 static int tg3_open(struct net_device *dev)
11698 struct tg3 *tp = netdev_priv(dev);
11701 if (tp->pcierr_recovery) {
11702 netdev_err(dev, "Failed to open device. PCI error recovery "
11707 if (tp->fw_needed) {
11708 err = tg3_request_firmware(tp);
11709 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11711 netdev_warn(tp->dev, "EEE capability disabled\n");
11712 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11713 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11714 netdev_warn(tp->dev, "EEE capability restored\n");
11715 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11717 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11721 netdev_warn(tp->dev, "TSO capability disabled\n");
11722 tg3_flag_clear(tp, TSO_CAPABLE);
11723 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11724 netdev_notice(tp->dev, "TSO capability restored\n");
11725 tg3_flag_set(tp, TSO_CAPABLE);
11729 tg3_carrier_off(tp);
11731 err = tg3_power_up(tp);
11735 tg3_full_lock(tp, 0);
11737 tg3_disable_ints(tp);
11738 tg3_flag_clear(tp, INIT_COMPLETE);
11740 tg3_full_unlock(tp);
11742 err = tg3_start(tp,
11743 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11746 tg3_frob_aux_power(tp, false);
11747 pci_set_power_state(tp->pdev, PCI_D3hot);
11753 static int tg3_close(struct net_device *dev)
11755 struct tg3 *tp = netdev_priv(dev);
11757 if (tp->pcierr_recovery) {
11758 netdev_err(dev, "Failed to close device. PCI error recovery "
11765 if (pci_device_is_present(tp->pdev)) {
11766 tg3_power_down_prepare(tp);
11768 tg3_carrier_off(tp);
11773 static inline u64 get_stat64(tg3_stat64_t *val)
11775 return ((u64)val->high << 32) | ((u64)val->low);
11778 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11780 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11782 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11783 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11784 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11787 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11788 tg3_writephy(tp, MII_TG3_TEST1,
11789 val | MII_TG3_TEST1_CRC_EN);
11790 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11794 tp->phy_crc_errors += val;
11796 return tp->phy_crc_errors;
11799 return get_stat64(&hw_stats->rx_fcs_errors);
11802 #define ESTAT_ADD(member) \
11803 estats->member = old_estats->member + \
11804 get_stat64(&hw_stats->member)
11806 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11808 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11809 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11811 ESTAT_ADD(rx_octets);
11812 ESTAT_ADD(rx_fragments);
11813 ESTAT_ADD(rx_ucast_packets);
11814 ESTAT_ADD(rx_mcast_packets);
11815 ESTAT_ADD(rx_bcast_packets);
11816 ESTAT_ADD(rx_fcs_errors);
11817 ESTAT_ADD(rx_align_errors);
11818 ESTAT_ADD(rx_xon_pause_rcvd);
11819 ESTAT_ADD(rx_xoff_pause_rcvd);
11820 ESTAT_ADD(rx_mac_ctrl_rcvd);
11821 ESTAT_ADD(rx_xoff_entered);
11822 ESTAT_ADD(rx_frame_too_long_errors);
11823 ESTAT_ADD(rx_jabbers);
11824 ESTAT_ADD(rx_undersize_packets);
11825 ESTAT_ADD(rx_in_length_errors);
11826 ESTAT_ADD(rx_out_length_errors);
11827 ESTAT_ADD(rx_64_or_less_octet_packets);
11828 ESTAT_ADD(rx_65_to_127_octet_packets);
11829 ESTAT_ADD(rx_128_to_255_octet_packets);
11830 ESTAT_ADD(rx_256_to_511_octet_packets);
11831 ESTAT_ADD(rx_512_to_1023_octet_packets);
11832 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11833 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11834 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11835 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11836 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11838 ESTAT_ADD(tx_octets);
11839 ESTAT_ADD(tx_collisions);
11840 ESTAT_ADD(tx_xon_sent);
11841 ESTAT_ADD(tx_xoff_sent);
11842 ESTAT_ADD(tx_flow_control);
11843 ESTAT_ADD(tx_mac_errors);
11844 ESTAT_ADD(tx_single_collisions);
11845 ESTAT_ADD(tx_mult_collisions);
11846 ESTAT_ADD(tx_deferred);
11847 ESTAT_ADD(tx_excessive_collisions);
11848 ESTAT_ADD(tx_late_collisions);
11849 ESTAT_ADD(tx_collide_2times);
11850 ESTAT_ADD(tx_collide_3times);
11851 ESTAT_ADD(tx_collide_4times);
11852 ESTAT_ADD(tx_collide_5times);
11853 ESTAT_ADD(tx_collide_6times);
11854 ESTAT_ADD(tx_collide_7times);
11855 ESTAT_ADD(tx_collide_8times);
11856 ESTAT_ADD(tx_collide_9times);
11857 ESTAT_ADD(tx_collide_10times);
11858 ESTAT_ADD(tx_collide_11times);
11859 ESTAT_ADD(tx_collide_12times);
11860 ESTAT_ADD(tx_collide_13times);
11861 ESTAT_ADD(tx_collide_14times);
11862 ESTAT_ADD(tx_collide_15times);
11863 ESTAT_ADD(tx_ucast_packets);
11864 ESTAT_ADD(tx_mcast_packets);
11865 ESTAT_ADD(tx_bcast_packets);
11866 ESTAT_ADD(tx_carrier_sense_errors);
11867 ESTAT_ADD(tx_discards);
11868 ESTAT_ADD(tx_errors);
11870 ESTAT_ADD(dma_writeq_full);
11871 ESTAT_ADD(dma_write_prioq_full);
11872 ESTAT_ADD(rxbds_empty);
11873 ESTAT_ADD(rx_discards);
11874 ESTAT_ADD(rx_errors);
11875 ESTAT_ADD(rx_threshold_hit);
11877 ESTAT_ADD(dma_readq_full);
11878 ESTAT_ADD(dma_read_prioq_full);
11879 ESTAT_ADD(tx_comp_queue_full);
11881 ESTAT_ADD(ring_set_send_prod_index);
11882 ESTAT_ADD(ring_status_update);
11883 ESTAT_ADD(nic_irqs);
11884 ESTAT_ADD(nic_avoided_irqs);
11885 ESTAT_ADD(nic_tx_threshold_hit);
11887 ESTAT_ADD(mbuf_lwm_thresh_hit);
11890 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11892 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11893 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11895 stats->rx_packets = old_stats->rx_packets +
11896 get_stat64(&hw_stats->rx_ucast_packets) +
11897 get_stat64(&hw_stats->rx_mcast_packets) +
11898 get_stat64(&hw_stats->rx_bcast_packets);
11900 stats->tx_packets = old_stats->tx_packets +
11901 get_stat64(&hw_stats->tx_ucast_packets) +
11902 get_stat64(&hw_stats->tx_mcast_packets) +
11903 get_stat64(&hw_stats->tx_bcast_packets);
11905 stats->rx_bytes = old_stats->rx_bytes +
11906 get_stat64(&hw_stats->rx_octets);
11907 stats->tx_bytes = old_stats->tx_bytes +
11908 get_stat64(&hw_stats->tx_octets);
11910 stats->rx_errors = old_stats->rx_errors +
11911 get_stat64(&hw_stats->rx_errors);
11912 stats->tx_errors = old_stats->tx_errors +
11913 get_stat64(&hw_stats->tx_errors) +
11914 get_stat64(&hw_stats->tx_mac_errors) +
11915 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11916 get_stat64(&hw_stats->tx_discards);
11918 stats->multicast = old_stats->multicast +
11919 get_stat64(&hw_stats->rx_mcast_packets);
11920 stats->collisions = old_stats->collisions +
11921 get_stat64(&hw_stats->tx_collisions);
11923 stats->rx_length_errors = old_stats->rx_length_errors +
11924 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11925 get_stat64(&hw_stats->rx_undersize_packets);
11927 stats->rx_frame_errors = old_stats->rx_frame_errors +
11928 get_stat64(&hw_stats->rx_align_errors);
11929 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11930 get_stat64(&hw_stats->tx_discards);
11931 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11932 get_stat64(&hw_stats->tx_carrier_sense_errors);
11934 stats->rx_crc_errors = old_stats->rx_crc_errors +
11935 tg3_calc_crc_errors(tp);
11937 stats->rx_missed_errors = old_stats->rx_missed_errors +
11938 get_stat64(&hw_stats->rx_discards);
11940 stats->rx_dropped = tp->rx_dropped;
11941 stats->tx_dropped = tp->tx_dropped;
11944 static int tg3_get_regs_len(struct net_device *dev)
11946 return TG3_REG_BLK_SIZE;
11949 static void tg3_get_regs(struct net_device *dev,
11950 struct ethtool_regs *regs, void *_p)
11952 struct tg3 *tp = netdev_priv(dev);
11956 memset(_p, 0, TG3_REG_BLK_SIZE);
11958 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11961 tg3_full_lock(tp, 0);
11963 tg3_dump_legacy_regs(tp, (u32 *)_p);
11965 tg3_full_unlock(tp);
11968 static int tg3_get_eeprom_len(struct net_device *dev)
11970 struct tg3 *tp = netdev_priv(dev);
11972 return tp->nvram_size;
11975 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11977 struct tg3 *tp = netdev_priv(dev);
11978 int ret, cpmu_restore = 0;
11980 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11983 if (tg3_flag(tp, NO_NVRAM))
11986 offset = eeprom->offset;
11990 eeprom->magic = TG3_EEPROM_MAGIC;
11992 /* Override clock, link aware and link idle modes */
11993 if (tg3_flag(tp, CPMU_PRESENT)) {
11994 cpmu_val = tr32(TG3_CPMU_CTRL);
11995 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11996 CPMU_CTRL_LINK_IDLE_MODE)) {
11997 tw32(TG3_CPMU_CTRL, cpmu_val &
11998 ~(CPMU_CTRL_LINK_AWARE_MODE |
11999 CPMU_CTRL_LINK_IDLE_MODE));
12003 tg3_override_clk(tp);
12006 /* adjustments to start on required 4 byte boundary */
12007 b_offset = offset & 3;
12008 b_count = 4 - b_offset;
12009 if (b_count > len) {
12010 /* i.e. offset=1 len=2 */
12013 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12016 memcpy(data, ((char *)&val) + b_offset, b_count);
12019 eeprom->len += b_count;
12022 /* read bytes up to the last 4 byte boundary */
12023 pd = &data[eeprom->len];
12024 for (i = 0; i < (len - (len & 3)); i += 4) {
12025 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12032 memcpy(pd + i, &val, 4);
12033 if (need_resched()) {
12034 if (signal_pending(current)) {
12045 /* read last bytes not ending on 4 byte boundary */
12046 pd = &data[eeprom->len];
12048 b_offset = offset + len - b_count;
12049 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12052 memcpy(pd, &val, b_count);
12053 eeprom->len += b_count;
12058 /* Restore clock, link aware and link idle modes */
12059 tg3_restore_clk(tp);
12061 tw32(TG3_CPMU_CTRL, cpmu_val);
12066 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12068 struct tg3 *tp = netdev_priv(dev);
12070 u32 offset, len, b_offset, odd_len;
12072 __be32 start = 0, end;
12074 if (tg3_flag(tp, NO_NVRAM) ||
12075 eeprom->magic != TG3_EEPROM_MAGIC)
12078 offset = eeprom->offset;
12081 if ((b_offset = (offset & 3))) {
12082 /* adjustments to start on required 4 byte boundary */
12083 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12094 /* adjustments to end on required 4 byte boundary */
12096 len = (len + 3) & ~3;
12097 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12103 if (b_offset || odd_len) {
12104 buf = kmalloc(len, GFP_KERNEL);
12108 memcpy(buf, &start, 4);
12110 memcpy(buf+len-4, &end, 4);
12111 memcpy(buf + b_offset, data, eeprom->len);
12114 ret = tg3_nvram_write_block(tp, offset, len, buf);
12122 static int tg3_get_link_ksettings(struct net_device *dev,
12123 struct ethtool_link_ksettings *cmd)
12125 struct tg3 *tp = netdev_priv(dev);
12126 u32 supported, advertising;
12128 if (tg3_flag(tp, USE_PHYLIB)) {
12129 struct phy_device *phydev;
12130 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12132 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12133 phy_ethtool_ksettings_get(phydev, cmd);
12138 supported = (SUPPORTED_Autoneg);
12140 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12141 supported |= (SUPPORTED_1000baseT_Half |
12142 SUPPORTED_1000baseT_Full);
12144 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12145 supported |= (SUPPORTED_100baseT_Half |
12146 SUPPORTED_100baseT_Full |
12147 SUPPORTED_10baseT_Half |
12148 SUPPORTED_10baseT_Full |
12150 cmd->base.port = PORT_TP;
12152 supported |= SUPPORTED_FIBRE;
12153 cmd->base.port = PORT_FIBRE;
12155 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12158 advertising = tp->link_config.advertising;
12159 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12160 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12161 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12162 advertising |= ADVERTISED_Pause;
12164 advertising |= ADVERTISED_Pause |
12165 ADVERTISED_Asym_Pause;
12167 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12168 advertising |= ADVERTISED_Asym_Pause;
12171 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12174 if (netif_running(dev) && tp->link_up) {
12175 cmd->base.speed = tp->link_config.active_speed;
12176 cmd->base.duplex = tp->link_config.active_duplex;
12177 ethtool_convert_legacy_u32_to_link_mode(
12178 cmd->link_modes.lp_advertising,
12179 tp->link_config.rmt_adv);
12181 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12182 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12183 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12185 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12188 cmd->base.speed = SPEED_UNKNOWN;
12189 cmd->base.duplex = DUPLEX_UNKNOWN;
12190 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12192 cmd->base.phy_address = tp->phy_addr;
12193 cmd->base.autoneg = tp->link_config.autoneg;
12197 static int tg3_set_link_ksettings(struct net_device *dev,
12198 const struct ethtool_link_ksettings *cmd)
12200 struct tg3 *tp = netdev_priv(dev);
12201 u32 speed = cmd->base.speed;
12204 if (tg3_flag(tp, USE_PHYLIB)) {
12205 struct phy_device *phydev;
12206 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12208 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12209 return phy_ethtool_ksettings_set(phydev, cmd);
12212 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12213 cmd->base.autoneg != AUTONEG_DISABLE)
12216 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12217 cmd->base.duplex != DUPLEX_FULL &&
12218 cmd->base.duplex != DUPLEX_HALF)
12221 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12222 cmd->link_modes.advertising);
12224 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12225 u32 mask = ADVERTISED_Autoneg |
12227 ADVERTISED_Asym_Pause;
12229 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12230 mask |= ADVERTISED_1000baseT_Half |
12231 ADVERTISED_1000baseT_Full;
12233 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12234 mask |= ADVERTISED_100baseT_Half |
12235 ADVERTISED_100baseT_Full |
12236 ADVERTISED_10baseT_Half |
12237 ADVERTISED_10baseT_Full |
12240 mask |= ADVERTISED_FIBRE;
12242 if (advertising & ~mask)
12245 mask &= (ADVERTISED_1000baseT_Half |
12246 ADVERTISED_1000baseT_Full |
12247 ADVERTISED_100baseT_Half |
12248 ADVERTISED_100baseT_Full |
12249 ADVERTISED_10baseT_Half |
12250 ADVERTISED_10baseT_Full);
12252 advertising &= mask;
12254 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12255 if (speed != SPEED_1000)
12258 if (cmd->base.duplex != DUPLEX_FULL)
12261 if (speed != SPEED_100 &&
12267 tg3_full_lock(tp, 0);
12269 tp->link_config.autoneg = cmd->base.autoneg;
12270 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12271 tp->link_config.advertising = (advertising |
12272 ADVERTISED_Autoneg);
12273 tp->link_config.speed = SPEED_UNKNOWN;
12274 tp->link_config.duplex = DUPLEX_UNKNOWN;
12276 tp->link_config.advertising = 0;
12277 tp->link_config.speed = speed;
12278 tp->link_config.duplex = cmd->base.duplex;
12281 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12283 tg3_warn_mgmt_link_flap(tp);
12285 if (netif_running(dev))
12286 tg3_setup_phy(tp, true);
12288 tg3_full_unlock(tp);
12293 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12295 struct tg3 *tp = netdev_priv(dev);
12297 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12298 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12299 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12302 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12304 struct tg3 *tp = netdev_priv(dev);
12306 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12307 wol->supported = WAKE_MAGIC;
12309 wol->supported = 0;
12311 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12312 wol->wolopts = WAKE_MAGIC;
12313 memset(&wol->sopass, 0, sizeof(wol->sopass));
12316 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12318 struct tg3 *tp = netdev_priv(dev);
12319 struct device *dp = &tp->pdev->dev;
12321 if (wol->wolopts & ~WAKE_MAGIC)
12323 if ((wol->wolopts & WAKE_MAGIC) &&
12324 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12327 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12329 if (device_may_wakeup(dp))
12330 tg3_flag_set(tp, WOL_ENABLE);
12332 tg3_flag_clear(tp, WOL_ENABLE);
12337 static u32 tg3_get_msglevel(struct net_device *dev)
12339 struct tg3 *tp = netdev_priv(dev);
12340 return tp->msg_enable;
12343 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12345 struct tg3 *tp = netdev_priv(dev);
12346 tp->msg_enable = value;
12349 static int tg3_nway_reset(struct net_device *dev)
12351 struct tg3 *tp = netdev_priv(dev);
12354 if (!netif_running(dev))
12357 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12360 tg3_warn_mgmt_link_flap(tp);
12362 if (tg3_flag(tp, USE_PHYLIB)) {
12363 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12365 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12369 spin_lock_bh(&tp->lock);
12371 tg3_readphy(tp, MII_BMCR, &bmcr);
12372 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12373 ((bmcr & BMCR_ANENABLE) ||
12374 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12375 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12379 spin_unlock_bh(&tp->lock);
12385 static void tg3_get_ringparam(struct net_device *dev,
12386 struct ethtool_ringparam *ering,
12387 struct kernel_ethtool_ringparam *kernel_ering,
12388 struct netlink_ext_ack *extack)
12390 struct tg3 *tp = netdev_priv(dev);
12392 ering->rx_max_pending = tp->rx_std_ring_mask;
12393 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12394 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12396 ering->rx_jumbo_max_pending = 0;
12398 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12400 ering->rx_pending = tp->rx_pending;
12401 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12402 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12404 ering->rx_jumbo_pending = 0;
12406 ering->tx_pending = tp->napi[0].tx_pending;
12409 static int tg3_set_ringparam(struct net_device *dev,
12410 struct ethtool_ringparam *ering,
12411 struct kernel_ethtool_ringparam *kernel_ering,
12412 struct netlink_ext_ack *extack)
12414 struct tg3 *tp = netdev_priv(dev);
12415 int i, irq_sync = 0, err = 0;
12416 bool reset_phy = false;
12418 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12419 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12420 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12421 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12422 (tg3_flag(tp, TSO_BUG) &&
12423 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12426 if (netif_running(dev)) {
12428 tg3_netif_stop(tp);
12432 tg3_full_lock(tp, irq_sync);
12434 tp->rx_pending = ering->rx_pending;
12436 if (tg3_flag(tp, MAX_RXPEND_64) &&
12437 tp->rx_pending > 63)
12438 tp->rx_pending = 63;
12440 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12441 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12443 for (i = 0; i < tp->irq_max; i++)
12444 tp->napi[i].tx_pending = ering->tx_pending;
12446 if (netif_running(dev)) {
12447 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12448 /* Reset PHY to avoid PHY lock up */
12449 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12450 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12451 tg3_asic_rev(tp) == ASIC_REV_5720)
12454 err = tg3_restart_hw(tp, reset_phy);
12456 tg3_netif_start(tp);
12459 tg3_full_unlock(tp);
12461 if (irq_sync && !err)
12467 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12469 struct tg3 *tp = netdev_priv(dev);
12471 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12473 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12474 epause->rx_pause = 1;
12476 epause->rx_pause = 0;
12478 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12479 epause->tx_pause = 1;
12481 epause->tx_pause = 0;
12484 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12486 struct tg3 *tp = netdev_priv(dev);
12488 bool reset_phy = false;
12490 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12491 tg3_warn_mgmt_link_flap(tp);
12493 if (tg3_flag(tp, USE_PHYLIB)) {
12494 struct phy_device *phydev;
12496 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12498 if (!phy_validate_pause(phydev, epause))
12501 tp->link_config.flowctrl = 0;
12502 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12503 if (epause->rx_pause) {
12504 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12506 if (epause->tx_pause) {
12507 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12509 } else if (epause->tx_pause) {
12510 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12513 if (epause->autoneg)
12514 tg3_flag_set(tp, PAUSE_AUTONEG);
12516 tg3_flag_clear(tp, PAUSE_AUTONEG);
12518 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12519 if (phydev->autoneg) {
12520 /* phy_set_asym_pause() will
12521 * renegotiate the link to inform our
12522 * link partner of our flow control
12523 * settings, even if the flow control
12524 * is forced. Let tg3_adjust_link()
12525 * do the final flow control setup.
12530 if (!epause->autoneg)
12531 tg3_setup_flow_control(tp, 0, 0);
12536 if (netif_running(dev)) {
12537 tg3_netif_stop(tp);
12541 tg3_full_lock(tp, irq_sync);
12543 if (epause->autoneg)
12544 tg3_flag_set(tp, PAUSE_AUTONEG);
12546 tg3_flag_clear(tp, PAUSE_AUTONEG);
12547 if (epause->rx_pause)
12548 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12550 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12551 if (epause->tx_pause)
12552 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12554 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12556 if (netif_running(dev)) {
12557 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12558 /* Reset PHY to avoid PHY lock up */
12559 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12560 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12561 tg3_asic_rev(tp) == ASIC_REV_5720)
12564 err = tg3_restart_hw(tp, reset_phy);
12566 tg3_netif_start(tp);
12569 tg3_full_unlock(tp);
12572 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12577 static int tg3_get_sset_count(struct net_device *dev, int sset)
12581 return TG3_NUM_TEST;
12583 return TG3_NUM_STATS;
12585 return -EOPNOTSUPP;
12589 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12590 u32 *rules __always_unused)
12592 struct tg3 *tp = netdev_priv(dev);
12594 if (!tg3_flag(tp, SUPPORT_MSIX))
12595 return -EOPNOTSUPP;
12597 switch (info->cmd) {
12598 case ETHTOOL_GRXRINGS:
12599 if (netif_running(tp->dev))
12600 info->data = tp->rxq_cnt;
12602 info->data = num_online_cpus();
12603 if (info->data > TG3_RSS_MAX_NUM_QS)
12604 info->data = TG3_RSS_MAX_NUM_QS;
12610 return -EOPNOTSUPP;
12614 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12617 struct tg3 *tp = netdev_priv(dev);
12619 if (tg3_flag(tp, SUPPORT_MSIX))
12620 size = TG3_RSS_INDIR_TBL_SIZE;
12625 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12627 struct tg3 *tp = netdev_priv(dev);
12631 *hfunc = ETH_RSS_HASH_TOP;
12635 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12636 indir[i] = tp->rss_ind_tbl[i];
12641 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12644 struct tg3 *tp = netdev_priv(dev);
12647 /* We require at least one supported parameter to be changed and no
12648 * change in any of the unsupported parameters
12651 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12652 return -EOPNOTSUPP;
12657 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12658 tp->rss_ind_tbl[i] = indir[i];
12660 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12663 /* It is legal to write the indirection
12664 * table while the device is running.
12666 tg3_full_lock(tp, 0);
12667 tg3_rss_write_indir_tbl(tp);
12668 tg3_full_unlock(tp);
12673 static void tg3_get_channels(struct net_device *dev,
12674 struct ethtool_channels *channel)
12676 struct tg3 *tp = netdev_priv(dev);
12677 u32 deflt_qs = netif_get_num_default_rss_queues();
12679 channel->max_rx = tp->rxq_max;
12680 channel->max_tx = tp->txq_max;
12682 if (netif_running(dev)) {
12683 channel->rx_count = tp->rxq_cnt;
12684 channel->tx_count = tp->txq_cnt;
12687 channel->rx_count = tp->rxq_req;
12689 channel->rx_count = min(deflt_qs, tp->rxq_max);
12692 channel->tx_count = tp->txq_req;
12694 channel->tx_count = min(deflt_qs, tp->txq_max);
12698 static int tg3_set_channels(struct net_device *dev,
12699 struct ethtool_channels *channel)
12701 struct tg3 *tp = netdev_priv(dev);
12703 if (!tg3_flag(tp, SUPPORT_MSIX))
12704 return -EOPNOTSUPP;
12706 if (channel->rx_count > tp->rxq_max ||
12707 channel->tx_count > tp->txq_max)
12710 tp->rxq_req = channel->rx_count;
12711 tp->txq_req = channel->tx_count;
12713 if (!netif_running(dev))
12718 tg3_carrier_off(tp);
12720 tg3_start(tp, true, false, false);
12725 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12727 switch (stringset) {
12729 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12732 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12735 WARN_ON(1); /* we need a WARN() */
12740 static int tg3_set_phys_id(struct net_device *dev,
12741 enum ethtool_phys_id_state state)
12743 struct tg3 *tp = netdev_priv(dev);
12746 case ETHTOOL_ID_ACTIVE:
12747 return 1; /* cycle on/off once per second */
12749 case ETHTOOL_ID_ON:
12750 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12751 LED_CTRL_1000MBPS_ON |
12752 LED_CTRL_100MBPS_ON |
12753 LED_CTRL_10MBPS_ON |
12754 LED_CTRL_TRAFFIC_OVERRIDE |
12755 LED_CTRL_TRAFFIC_BLINK |
12756 LED_CTRL_TRAFFIC_LED);
12759 case ETHTOOL_ID_OFF:
12760 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12761 LED_CTRL_TRAFFIC_OVERRIDE);
12764 case ETHTOOL_ID_INACTIVE:
12765 tw32(MAC_LED_CTRL, tp->led_ctrl);
12772 static void tg3_get_ethtool_stats(struct net_device *dev,
12773 struct ethtool_stats *estats, u64 *tmp_stats)
12775 struct tg3 *tp = netdev_priv(dev);
12778 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12780 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12783 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12787 u32 offset = 0, len = 0;
12790 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12793 if (magic == TG3_EEPROM_MAGIC) {
12794 for (offset = TG3_NVM_DIR_START;
12795 offset < TG3_NVM_DIR_END;
12796 offset += TG3_NVM_DIRENT_SIZE) {
12797 if (tg3_nvram_read(tp, offset, &val))
12800 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12801 TG3_NVM_DIRTYPE_EXTVPD)
12805 if (offset != TG3_NVM_DIR_END) {
12806 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12807 if (tg3_nvram_read(tp, offset + 4, &offset))
12810 offset = tg3_nvram_logical_addr(tp, offset);
12813 if (!offset || !len) {
12814 offset = TG3_NVM_VPD_OFF;
12815 len = TG3_NVM_VPD_LEN;
12818 buf = kmalloc(len, GFP_KERNEL);
12822 for (i = 0; i < len; i += 4) {
12823 /* The data is in little-endian format in NVRAM.
12824 * Use the big-endian read routines to preserve
12825 * the byte order as it exists in NVRAM.
12827 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12832 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12844 #define NVRAM_TEST_SIZE 0x100
12845 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12846 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12847 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12848 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12849 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12850 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12851 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12852 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12854 static int tg3_test_nvram(struct tg3 *tp)
12858 int i, j, k, err = 0, size;
12861 if (tg3_flag(tp, NO_NVRAM))
12864 if (tg3_nvram_read(tp, 0, &magic) != 0)
12867 if (magic == TG3_EEPROM_MAGIC)
12868 size = NVRAM_TEST_SIZE;
12869 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12870 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12871 TG3_EEPROM_SB_FORMAT_1) {
12872 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12873 case TG3_EEPROM_SB_REVISION_0:
12874 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12876 case TG3_EEPROM_SB_REVISION_2:
12877 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12879 case TG3_EEPROM_SB_REVISION_3:
12880 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12882 case TG3_EEPROM_SB_REVISION_4:
12883 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12885 case TG3_EEPROM_SB_REVISION_5:
12886 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12888 case TG3_EEPROM_SB_REVISION_6:
12889 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12896 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12897 size = NVRAM_SELFBOOT_HW_SIZE;
12901 buf = kmalloc(size, GFP_KERNEL);
12906 for (i = 0, j = 0; i < size; i += 4, j++) {
12907 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12914 /* Selfboot format */
12915 magic = be32_to_cpu(buf[0]);
12916 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12917 TG3_EEPROM_MAGIC_FW) {
12918 u8 *buf8 = (u8 *) buf, csum8 = 0;
12920 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12921 TG3_EEPROM_SB_REVISION_2) {
12922 /* For rev 2, the csum doesn't include the MBA. */
12923 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12925 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12928 for (i = 0; i < size; i++)
12941 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12942 TG3_EEPROM_MAGIC_HW) {
12943 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12944 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12945 u8 *buf8 = (u8 *) buf;
12947 /* Separate the parity bits and the data bytes. */
12948 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12949 if ((i == 0) || (i == 8)) {
12953 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12954 parity[k++] = buf8[i] & msk;
12956 } else if (i == 16) {
12960 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12961 parity[k++] = buf8[i] & msk;
12964 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12965 parity[k++] = buf8[i] & msk;
12968 data[j++] = buf8[i];
12972 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12973 u8 hw8 = hweight8(data[i]);
12975 if ((hw8 & 0x1) && parity[i])
12977 else if (!(hw8 & 0x1) && !parity[i])
12986 /* Bootstrap checksum at offset 0x10 */
12987 csum = calc_crc((unsigned char *) buf, 0x10);
12988 if (csum != le32_to_cpu(buf[0x10/4]))
12991 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12992 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12993 if (csum != le32_to_cpu(buf[0xfc/4]))
12998 buf = tg3_vpd_readblock(tp, &len);
13002 err = pci_vpd_check_csum(buf, len);
13003 /* go on if no checksum found */
13011 #define TG3_SERDES_TIMEOUT_SEC 2
13012 #define TG3_COPPER_TIMEOUT_SEC 6
13014 static int tg3_test_link(struct tg3 *tp)
13018 if (!netif_running(tp->dev))
13021 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13022 max = TG3_SERDES_TIMEOUT_SEC;
13024 max = TG3_COPPER_TIMEOUT_SEC;
13026 for (i = 0; i < max; i++) {
13030 if (msleep_interruptible(1000))
13037 /* Only test the commonly used registers */
13038 static int tg3_test_registers(struct tg3 *tp)
13040 int i, is_5705, is_5750;
13041 u32 offset, read_mask, write_mask, val, save_val, read_val;
13045 #define TG3_FL_5705 0x1
13046 #define TG3_FL_NOT_5705 0x2
13047 #define TG3_FL_NOT_5788 0x4
13048 #define TG3_FL_NOT_5750 0x8
13052 /* MAC Control Registers */
13053 { MAC_MODE, TG3_FL_NOT_5705,
13054 0x00000000, 0x00ef6f8c },
13055 { MAC_MODE, TG3_FL_5705,
13056 0x00000000, 0x01ef6b8c },
13057 { MAC_STATUS, TG3_FL_NOT_5705,
13058 0x03800107, 0x00000000 },
13059 { MAC_STATUS, TG3_FL_5705,
13060 0x03800100, 0x00000000 },
13061 { MAC_ADDR_0_HIGH, 0x0000,
13062 0x00000000, 0x0000ffff },
13063 { MAC_ADDR_0_LOW, 0x0000,
13064 0x00000000, 0xffffffff },
13065 { MAC_RX_MTU_SIZE, 0x0000,
13066 0x00000000, 0x0000ffff },
13067 { MAC_TX_MODE, 0x0000,
13068 0x00000000, 0x00000070 },
13069 { MAC_TX_LENGTHS, 0x0000,
13070 0x00000000, 0x00003fff },
13071 { MAC_RX_MODE, TG3_FL_NOT_5705,
13072 0x00000000, 0x000007fc },
13073 { MAC_RX_MODE, TG3_FL_5705,
13074 0x00000000, 0x000007dc },
13075 { MAC_HASH_REG_0, 0x0000,
13076 0x00000000, 0xffffffff },
13077 { MAC_HASH_REG_1, 0x0000,
13078 0x00000000, 0xffffffff },
13079 { MAC_HASH_REG_2, 0x0000,
13080 0x00000000, 0xffffffff },
13081 { MAC_HASH_REG_3, 0x0000,
13082 0x00000000, 0xffffffff },
13084 /* Receive Data and Receive BD Initiator Control Registers. */
13085 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13086 0x00000000, 0xffffffff },
13087 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13088 0x00000000, 0xffffffff },
13089 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13090 0x00000000, 0x00000003 },
13091 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13092 0x00000000, 0xffffffff },
13093 { RCVDBDI_STD_BD+0, 0x0000,
13094 0x00000000, 0xffffffff },
13095 { RCVDBDI_STD_BD+4, 0x0000,
13096 0x00000000, 0xffffffff },
13097 { RCVDBDI_STD_BD+8, 0x0000,
13098 0x00000000, 0xffff0002 },
13099 { RCVDBDI_STD_BD+0xc, 0x0000,
13100 0x00000000, 0xffffffff },
13102 /* Receive BD Initiator Control Registers. */
13103 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13104 0x00000000, 0xffffffff },
13105 { RCVBDI_STD_THRESH, TG3_FL_5705,
13106 0x00000000, 0x000003ff },
13107 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13108 0x00000000, 0xffffffff },
13110 /* Host Coalescing Control Registers. */
13111 { HOSTCC_MODE, TG3_FL_NOT_5705,
13112 0x00000000, 0x00000004 },
13113 { HOSTCC_MODE, TG3_FL_5705,
13114 0x00000000, 0x000000f6 },
13115 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13116 0x00000000, 0xffffffff },
13117 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13118 0x00000000, 0x000003ff },
13119 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13120 0x00000000, 0xffffffff },
13121 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13122 0x00000000, 0x000003ff },
13123 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13124 0x00000000, 0xffffffff },
13125 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13126 0x00000000, 0x000000ff },
13127 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13128 0x00000000, 0xffffffff },
13129 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13130 0x00000000, 0x000000ff },
13131 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13132 0x00000000, 0xffffffff },
13133 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13134 0x00000000, 0xffffffff },
13135 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13136 0x00000000, 0xffffffff },
13137 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13138 0x00000000, 0x000000ff },
13139 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13140 0x00000000, 0xffffffff },
13141 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13142 0x00000000, 0x000000ff },
13143 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13144 0x00000000, 0xffffffff },
13145 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13146 0x00000000, 0xffffffff },
13147 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13148 0x00000000, 0xffffffff },
13149 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13150 0x00000000, 0xffffffff },
13151 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13152 0x00000000, 0xffffffff },
13153 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13154 0xffffffff, 0x00000000 },
13155 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13156 0xffffffff, 0x00000000 },
13158 /* Buffer Manager Control Registers. */
13159 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13160 0x00000000, 0x007fff80 },
13161 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13162 0x00000000, 0x007fffff },
13163 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13164 0x00000000, 0x0000003f },
13165 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13166 0x00000000, 0x000001ff },
13167 { BUFMGR_MB_HIGH_WATER, 0x0000,
13168 0x00000000, 0x000001ff },
13169 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13170 0xffffffff, 0x00000000 },
13171 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13172 0xffffffff, 0x00000000 },
13174 /* Mailbox Registers */
13175 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13176 0x00000000, 0x000001ff },
13177 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13178 0x00000000, 0x000001ff },
13179 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13180 0x00000000, 0x000007ff },
13181 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13182 0x00000000, 0x000001ff },
13184 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13187 is_5705 = is_5750 = 0;
13188 if (tg3_flag(tp, 5705_PLUS)) {
13190 if (tg3_flag(tp, 5750_PLUS))
13194 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13195 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13198 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13201 if (tg3_flag(tp, IS_5788) &&
13202 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13205 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13208 offset = (u32) reg_tbl[i].offset;
13209 read_mask = reg_tbl[i].read_mask;
13210 write_mask = reg_tbl[i].write_mask;
13212 /* Save the original register content */
13213 save_val = tr32(offset);
13215 /* Determine the read-only value. */
13216 read_val = save_val & read_mask;
13218 /* Write zero to the register, then make sure the read-only bits
13219 * are not changed and the read/write bits are all zeros.
13223 val = tr32(offset);
13225 /* Test the read-only and read/write bits. */
13226 if (((val & read_mask) != read_val) || (val & write_mask))
13229 /* Write ones to all the bits defined by RdMask and WrMask, then
13230 * make sure the read-only bits are not changed and the
13231 * read/write bits are all ones.
13233 tw32(offset, read_mask | write_mask);
13235 val = tr32(offset);
13237 /* Test the read-only bits. */
13238 if ((val & read_mask) != read_val)
13241 /* Test the read/write bits. */
13242 if ((val & write_mask) != write_mask)
13245 tw32(offset, save_val);
13251 if (netif_msg_hw(tp))
13252 netdev_err(tp->dev,
13253 "Register test failed at offset %x\n", offset);
13254 tw32(offset, save_val);
13258 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13260 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13264 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13265 for (j = 0; j < len; j += 4) {
13268 tg3_write_mem(tp, offset + j, test_pattern[i]);
13269 tg3_read_mem(tp, offset + j, &val);
13270 if (val != test_pattern[i])
13277 static int tg3_test_memory(struct tg3 *tp)
13279 static struct mem_entry {
13282 } mem_tbl_570x[] = {
13283 { 0x00000000, 0x00b50},
13284 { 0x00002000, 0x1c000},
13285 { 0xffffffff, 0x00000}
13286 }, mem_tbl_5705[] = {
13287 { 0x00000100, 0x0000c},
13288 { 0x00000200, 0x00008},
13289 { 0x00004000, 0x00800},
13290 { 0x00006000, 0x01000},
13291 { 0x00008000, 0x02000},
13292 { 0x00010000, 0x0e000},
13293 { 0xffffffff, 0x00000}
13294 }, mem_tbl_5755[] = {
13295 { 0x00000200, 0x00008},
13296 { 0x00004000, 0x00800},
13297 { 0x00006000, 0x00800},
13298 { 0x00008000, 0x02000},
13299 { 0x00010000, 0x0c000},
13300 { 0xffffffff, 0x00000}
13301 }, mem_tbl_5906[] = {
13302 { 0x00000200, 0x00008},
13303 { 0x00004000, 0x00400},
13304 { 0x00006000, 0x00400},
13305 { 0x00008000, 0x01000},
13306 { 0x00010000, 0x01000},
13307 { 0xffffffff, 0x00000}
13308 }, mem_tbl_5717[] = {
13309 { 0x00000200, 0x00008},
13310 { 0x00010000, 0x0a000},
13311 { 0x00020000, 0x13c00},
13312 { 0xffffffff, 0x00000}
13313 }, mem_tbl_57765[] = {
13314 { 0x00000200, 0x00008},
13315 { 0x00004000, 0x00800},
13316 { 0x00006000, 0x09800},
13317 { 0x00010000, 0x0a000},
13318 { 0xffffffff, 0x00000}
13320 struct mem_entry *mem_tbl;
13324 if (tg3_flag(tp, 5717_PLUS))
13325 mem_tbl = mem_tbl_5717;
13326 else if (tg3_flag(tp, 57765_CLASS) ||
13327 tg3_asic_rev(tp) == ASIC_REV_5762)
13328 mem_tbl = mem_tbl_57765;
13329 else if (tg3_flag(tp, 5755_PLUS))
13330 mem_tbl = mem_tbl_5755;
13331 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13332 mem_tbl = mem_tbl_5906;
13333 else if (tg3_flag(tp, 5705_PLUS))
13334 mem_tbl = mem_tbl_5705;
13336 mem_tbl = mem_tbl_570x;
13338 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13339 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13347 #define TG3_TSO_MSS 500
13349 #define TG3_TSO_IP_HDR_LEN 20
13350 #define TG3_TSO_TCP_HDR_LEN 20
13351 #define TG3_TSO_TCP_OPT_LEN 12
13353 static const u8 tg3_tso_header[] = {
13355 0x45, 0x00, 0x00, 0x00,
13356 0x00, 0x00, 0x40, 0x00,
13357 0x40, 0x06, 0x00, 0x00,
13358 0x0a, 0x00, 0x00, 0x01,
13359 0x0a, 0x00, 0x00, 0x02,
13360 0x0d, 0x00, 0xe0, 0x00,
13361 0x00, 0x00, 0x01, 0x00,
13362 0x00, 0x00, 0x02, 0x00,
13363 0x80, 0x10, 0x10, 0x00,
13364 0x14, 0x09, 0x00, 0x00,
13365 0x01, 0x01, 0x08, 0x0a,
13366 0x11, 0x11, 0x11, 0x11,
13367 0x11, 0x11, 0x11, 0x11,
13370 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13372 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13373 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13375 struct sk_buff *skb;
13376 u8 *tx_data, *rx_data;
13378 int num_pkts, tx_len, rx_len, i, err;
13379 struct tg3_rx_buffer_desc *desc;
13380 struct tg3_napi *tnapi, *rnapi;
13381 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13383 tnapi = &tp->napi[0];
13384 rnapi = &tp->napi[0];
13385 if (tp->irq_cnt > 1) {
13386 if (tg3_flag(tp, ENABLE_RSS))
13387 rnapi = &tp->napi[1];
13388 if (tg3_flag(tp, ENABLE_TSS))
13389 tnapi = &tp->napi[1];
13391 coal_now = tnapi->coal_now | rnapi->coal_now;
13396 skb = netdev_alloc_skb(tp->dev, tx_len);
13400 tx_data = skb_put(skb, tx_len);
13401 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13402 memset(tx_data + ETH_ALEN, 0x0, 8);
13404 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13406 if (tso_loopback) {
13407 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13409 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13410 TG3_TSO_TCP_OPT_LEN;
13412 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13413 sizeof(tg3_tso_header));
13416 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13417 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13419 /* Set the total length field in the IP header */
13420 iph->tot_len = htons((u16)(mss + hdr_len));
13422 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13423 TXD_FLAG_CPU_POST_DMA);
13425 if (tg3_flag(tp, HW_TSO_1) ||
13426 tg3_flag(tp, HW_TSO_2) ||
13427 tg3_flag(tp, HW_TSO_3)) {
13429 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13430 th = (struct tcphdr *)&tx_data[val];
13433 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13435 if (tg3_flag(tp, HW_TSO_3)) {
13436 mss |= (hdr_len & 0xc) << 12;
13437 if (hdr_len & 0x10)
13438 base_flags |= 0x00000010;
13439 base_flags |= (hdr_len & 0x3e0) << 5;
13440 } else if (tg3_flag(tp, HW_TSO_2))
13441 mss |= hdr_len << 9;
13442 else if (tg3_flag(tp, HW_TSO_1) ||
13443 tg3_asic_rev(tp) == ASIC_REV_5705) {
13444 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13446 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13449 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13452 data_off = ETH_HLEN;
13454 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13455 tx_len > VLAN_ETH_FRAME_LEN)
13456 base_flags |= TXD_FLAG_JMB_PKT;
13459 for (i = data_off; i < tx_len; i++)
13460 tx_data[i] = (u8) (i & 0xff);
13462 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13463 if (dma_mapping_error(&tp->pdev->dev, map)) {
13464 dev_kfree_skb(skb);
13468 val = tnapi->tx_prod;
13469 tnapi->tx_buffers[val].skb = skb;
13470 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13472 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13477 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13479 budget = tg3_tx_avail(tnapi);
13480 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13481 base_flags | TXD_FLAG_END, mss, 0)) {
13482 tnapi->tx_buffers[val].skb = NULL;
13483 dev_kfree_skb(skb);
13489 /* Sync BD data before updating mailbox */
13492 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13493 tr32_mailbox(tnapi->prodmbox);
13497 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13498 for (i = 0; i < 35; i++) {
13499 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13504 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13505 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13506 if ((tx_idx == tnapi->tx_prod) &&
13507 (rx_idx == (rx_start_idx + num_pkts)))
13511 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13512 dev_kfree_skb(skb);
13514 if (tx_idx != tnapi->tx_prod)
13517 if (rx_idx != rx_start_idx + num_pkts)
13521 while (rx_idx != rx_start_idx) {
13522 desc = &rnapi->rx_rcb[rx_start_idx++];
13523 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13524 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13526 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13527 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13530 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13533 if (!tso_loopback) {
13534 if (rx_len != tx_len)
13537 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13538 if (opaque_key != RXD_OPAQUE_RING_STD)
13541 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13544 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13545 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13546 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13550 if (opaque_key == RXD_OPAQUE_RING_STD) {
13551 rx_data = tpr->rx_std_buffers[desc_idx].data;
13552 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13554 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13555 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13556 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13561 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13564 rx_data += TG3_RX_OFFSET(tp);
13565 for (i = data_off; i < rx_len; i++, val++) {
13566 if (*(rx_data + i) != (u8) (val & 0xff))
13573 /* tg3_free_rings will unmap and free the rx_data */
13578 #define TG3_STD_LOOPBACK_FAILED 1
13579 #define TG3_JMB_LOOPBACK_FAILED 2
13580 #define TG3_TSO_LOOPBACK_FAILED 4
13581 #define TG3_LOOPBACK_FAILED \
13582 (TG3_STD_LOOPBACK_FAILED | \
13583 TG3_JMB_LOOPBACK_FAILED | \
13584 TG3_TSO_LOOPBACK_FAILED)
13586 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13590 u32 jmb_pkt_sz = 9000;
13593 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13595 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13596 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13598 if (!netif_running(tp->dev)) {
13599 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13600 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13602 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13606 err = tg3_reset_hw(tp, true);
13608 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13609 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13611 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13615 if (tg3_flag(tp, ENABLE_RSS)) {
13618 /* Reroute all rx packets to the 1st queue */
13619 for (i = MAC_RSS_INDIR_TBL_0;
13620 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13624 /* HW errata - mac loopback fails in some cases on 5780.
13625 * Normal traffic and PHY loopback are not affected by
13626 * errata. Also, the MAC loopback test is deprecated for
13627 * all newer ASIC revisions.
13629 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13630 !tg3_flag(tp, CPMU_PRESENT)) {
13631 tg3_mac_loopback(tp, true);
13633 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13634 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13636 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13637 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13638 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13640 tg3_mac_loopback(tp, false);
13643 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13644 !tg3_flag(tp, USE_PHYLIB)) {
13647 tg3_phy_lpbk_set(tp, 0, false);
13649 /* Wait for link */
13650 for (i = 0; i < 100; i++) {
13651 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13656 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13657 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13658 if (tg3_flag(tp, TSO_CAPABLE) &&
13659 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13660 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13661 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13662 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13663 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13666 tg3_phy_lpbk_set(tp, 0, true);
13668 /* All link indications report up, but the hardware
13669 * isn't really ready for about 20 msec. Double it
13674 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13675 data[TG3_EXT_LOOPB_TEST] |=
13676 TG3_STD_LOOPBACK_FAILED;
13677 if (tg3_flag(tp, TSO_CAPABLE) &&
13678 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13679 data[TG3_EXT_LOOPB_TEST] |=
13680 TG3_TSO_LOOPBACK_FAILED;
13681 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13682 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13683 data[TG3_EXT_LOOPB_TEST] |=
13684 TG3_JMB_LOOPBACK_FAILED;
13687 /* Re-enable gphy autopowerdown. */
13688 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13689 tg3_phy_toggle_apd(tp, true);
13692 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13693 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13696 tp->phy_flags |= eee_cap;
13701 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13704 struct tg3 *tp = netdev_priv(dev);
13705 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13707 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13708 if (tg3_power_up(tp)) {
13709 etest->flags |= ETH_TEST_FL_FAILED;
13710 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13713 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13716 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13718 if (tg3_test_nvram(tp) != 0) {
13719 etest->flags |= ETH_TEST_FL_FAILED;
13720 data[TG3_NVRAM_TEST] = 1;
13722 if (!doextlpbk && tg3_test_link(tp)) {
13723 etest->flags |= ETH_TEST_FL_FAILED;
13724 data[TG3_LINK_TEST] = 1;
13726 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13727 int err, err2 = 0, irq_sync = 0;
13729 if (netif_running(dev)) {
13731 tg3_netif_stop(tp);
13735 tg3_full_lock(tp, irq_sync);
13736 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13737 err = tg3_nvram_lock(tp);
13738 tg3_halt_cpu(tp, RX_CPU_BASE);
13739 if (!tg3_flag(tp, 5705_PLUS))
13740 tg3_halt_cpu(tp, TX_CPU_BASE);
13742 tg3_nvram_unlock(tp);
13744 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13747 if (tg3_test_registers(tp) != 0) {
13748 etest->flags |= ETH_TEST_FL_FAILED;
13749 data[TG3_REGISTER_TEST] = 1;
13752 if (tg3_test_memory(tp) != 0) {
13753 etest->flags |= ETH_TEST_FL_FAILED;
13754 data[TG3_MEMORY_TEST] = 1;
13758 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13760 if (tg3_test_loopback(tp, data, doextlpbk))
13761 etest->flags |= ETH_TEST_FL_FAILED;
13763 tg3_full_unlock(tp);
13765 if (tg3_test_interrupt(tp) != 0) {
13766 etest->flags |= ETH_TEST_FL_FAILED;
13767 data[TG3_INTERRUPT_TEST] = 1;
13770 tg3_full_lock(tp, 0);
13772 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13773 if (netif_running(dev)) {
13774 tg3_flag_set(tp, INIT_COMPLETE);
13775 err2 = tg3_restart_hw(tp, true);
13777 tg3_netif_start(tp);
13780 tg3_full_unlock(tp);
13782 if (irq_sync && !err2)
13785 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13786 tg3_power_down_prepare(tp);
13790 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13792 struct tg3 *tp = netdev_priv(dev);
13793 struct hwtstamp_config stmpconf;
13795 if (!tg3_flag(tp, PTP_CAPABLE))
13796 return -EOPNOTSUPP;
13798 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13801 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13802 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13805 switch (stmpconf.rx_filter) {
13806 case HWTSTAMP_FILTER_NONE:
13809 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13810 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13811 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13813 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13814 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13815 TG3_RX_PTP_CTL_SYNC_EVNT;
13817 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13818 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13819 TG3_RX_PTP_CTL_DELAY_REQ;
13821 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13822 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13823 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13825 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13826 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13827 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13829 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13830 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13831 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13833 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13834 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13835 TG3_RX_PTP_CTL_SYNC_EVNT;
13837 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13838 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13839 TG3_RX_PTP_CTL_SYNC_EVNT;
13841 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13842 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13843 TG3_RX_PTP_CTL_SYNC_EVNT;
13845 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13846 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13847 TG3_RX_PTP_CTL_DELAY_REQ;
13849 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13850 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13851 TG3_RX_PTP_CTL_DELAY_REQ;
13853 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13854 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13855 TG3_RX_PTP_CTL_DELAY_REQ;
13861 if (netif_running(dev) && tp->rxptpctl)
13862 tw32(TG3_RX_PTP_CTL,
13863 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13865 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13866 tg3_flag_set(tp, TX_TSTAMP_EN);
13868 tg3_flag_clear(tp, TX_TSTAMP_EN);
13870 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13874 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13876 struct tg3 *tp = netdev_priv(dev);
13877 struct hwtstamp_config stmpconf;
13879 if (!tg3_flag(tp, PTP_CAPABLE))
13880 return -EOPNOTSUPP;
13882 stmpconf.flags = 0;
13883 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13884 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13886 switch (tp->rxptpctl) {
13888 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13890 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13891 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13893 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13894 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13896 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13897 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13899 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13900 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13902 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13903 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13905 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13906 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13908 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13909 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13911 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13912 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13914 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13915 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13917 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13918 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13920 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13921 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13923 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13924 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13931 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13935 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13937 struct mii_ioctl_data *data = if_mii(ifr);
13938 struct tg3 *tp = netdev_priv(dev);
13941 if (tg3_flag(tp, USE_PHYLIB)) {
13942 struct phy_device *phydev;
13943 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13945 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13946 return phy_mii_ioctl(phydev, ifr, cmd);
13951 data->phy_id = tp->phy_addr;
13954 case SIOCGMIIREG: {
13957 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13958 break; /* We have no PHY */
13960 if (!netif_running(dev))
13963 spin_lock_bh(&tp->lock);
13964 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13965 data->reg_num & 0x1f, &mii_regval);
13966 spin_unlock_bh(&tp->lock);
13968 data->val_out = mii_regval;
13974 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13975 break; /* We have no PHY */
13977 if (!netif_running(dev))
13980 spin_lock_bh(&tp->lock);
13981 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13982 data->reg_num & 0x1f, data->val_in);
13983 spin_unlock_bh(&tp->lock);
13987 case SIOCSHWTSTAMP:
13988 return tg3_hwtstamp_set(dev, ifr);
13990 case SIOCGHWTSTAMP:
13991 return tg3_hwtstamp_get(dev, ifr);
13997 return -EOPNOTSUPP;
14000 static int tg3_get_coalesce(struct net_device *dev,
14001 struct ethtool_coalesce *ec,
14002 struct kernel_ethtool_coalesce *kernel_coal,
14003 struct netlink_ext_ack *extack)
14005 struct tg3 *tp = netdev_priv(dev);
14007 memcpy(ec, &tp->coal, sizeof(*ec));
14011 static int tg3_set_coalesce(struct net_device *dev,
14012 struct ethtool_coalesce *ec,
14013 struct kernel_ethtool_coalesce *kernel_coal,
14014 struct netlink_ext_ack *extack)
14016 struct tg3 *tp = netdev_priv(dev);
14017 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14018 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14020 if (!tg3_flag(tp, 5705_PLUS)) {
14021 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14022 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14023 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14024 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14027 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14028 (!ec->rx_coalesce_usecs) ||
14029 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14030 (!ec->tx_coalesce_usecs) ||
14031 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14032 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14033 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14034 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14035 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14036 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14037 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14038 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14041 /* Only copy relevant parameters, ignore all others. */
14042 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14043 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14044 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14045 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14046 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14047 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14048 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14049 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14050 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14052 if (netif_running(dev)) {
14053 tg3_full_lock(tp, 0);
14054 __tg3_set_coalesce(tp, &tp->coal);
14055 tg3_full_unlock(tp);
14060 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14062 struct tg3 *tp = netdev_priv(dev);
14064 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14065 netdev_warn(tp->dev, "Board does not support EEE!\n");
14066 return -EOPNOTSUPP;
14069 if (edata->advertised != tp->eee.advertised) {
14070 netdev_warn(tp->dev,
14071 "Direct manipulation of EEE advertisement is not supported\n");
14075 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14076 netdev_warn(tp->dev,
14077 "Maximal Tx Lpi timer supported is %#x(u)\n",
14078 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14084 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14085 tg3_warn_mgmt_link_flap(tp);
14087 if (netif_running(tp->dev)) {
14088 tg3_full_lock(tp, 0);
14091 tg3_full_unlock(tp);
14097 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14099 struct tg3 *tp = netdev_priv(dev);
14101 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14102 netdev_warn(tp->dev,
14103 "Board does not support EEE!\n");
14104 return -EOPNOTSUPP;
14111 static const struct ethtool_ops tg3_ethtool_ops = {
14112 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14113 ETHTOOL_COALESCE_MAX_FRAMES |
14114 ETHTOOL_COALESCE_USECS_IRQ |
14115 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14116 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14117 .get_drvinfo = tg3_get_drvinfo,
14118 .get_regs_len = tg3_get_regs_len,
14119 .get_regs = tg3_get_regs,
14120 .get_wol = tg3_get_wol,
14121 .set_wol = tg3_set_wol,
14122 .get_msglevel = tg3_get_msglevel,
14123 .set_msglevel = tg3_set_msglevel,
14124 .nway_reset = tg3_nway_reset,
14125 .get_link = ethtool_op_get_link,
14126 .get_eeprom_len = tg3_get_eeprom_len,
14127 .get_eeprom = tg3_get_eeprom,
14128 .set_eeprom = tg3_set_eeprom,
14129 .get_ringparam = tg3_get_ringparam,
14130 .set_ringparam = tg3_set_ringparam,
14131 .get_pauseparam = tg3_get_pauseparam,
14132 .set_pauseparam = tg3_set_pauseparam,
14133 .self_test = tg3_self_test,
14134 .get_strings = tg3_get_strings,
14135 .set_phys_id = tg3_set_phys_id,
14136 .get_ethtool_stats = tg3_get_ethtool_stats,
14137 .get_coalesce = tg3_get_coalesce,
14138 .set_coalesce = tg3_set_coalesce,
14139 .get_sset_count = tg3_get_sset_count,
14140 .get_rxnfc = tg3_get_rxnfc,
14141 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14142 .get_rxfh = tg3_get_rxfh,
14143 .set_rxfh = tg3_set_rxfh,
14144 .get_channels = tg3_get_channels,
14145 .set_channels = tg3_set_channels,
14146 .get_ts_info = tg3_get_ts_info,
14147 .get_eee = tg3_get_eee,
14148 .set_eee = tg3_set_eee,
14149 .get_link_ksettings = tg3_get_link_ksettings,
14150 .set_link_ksettings = tg3_set_link_ksettings,
14153 static void tg3_get_stats64(struct net_device *dev,
14154 struct rtnl_link_stats64 *stats)
14156 struct tg3 *tp = netdev_priv(dev);
14158 spin_lock_bh(&tp->lock);
14159 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14160 *stats = tp->net_stats_prev;
14161 spin_unlock_bh(&tp->lock);
14165 tg3_get_nstats(tp, stats);
14166 spin_unlock_bh(&tp->lock);
14169 static void tg3_set_rx_mode(struct net_device *dev)
14171 struct tg3 *tp = netdev_priv(dev);
14173 if (!netif_running(dev))
14176 tg3_full_lock(tp, 0);
14177 __tg3_set_rx_mode(dev);
14178 tg3_full_unlock(tp);
14181 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14184 dev->mtu = new_mtu;
14186 if (new_mtu > ETH_DATA_LEN) {
14187 if (tg3_flag(tp, 5780_CLASS)) {
14188 netdev_update_features(dev);
14189 tg3_flag_clear(tp, TSO_CAPABLE);
14191 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14194 if (tg3_flag(tp, 5780_CLASS)) {
14195 tg3_flag_set(tp, TSO_CAPABLE);
14196 netdev_update_features(dev);
14198 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14202 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14204 struct tg3 *tp = netdev_priv(dev);
14206 bool reset_phy = false;
14208 if (!netif_running(dev)) {
14209 /* We'll just catch it later when the
14212 tg3_set_mtu(dev, tp, new_mtu);
14218 tg3_netif_stop(tp);
14220 tg3_set_mtu(dev, tp, new_mtu);
14222 tg3_full_lock(tp, 1);
14224 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14226 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14227 * breaks all requests to 256 bytes.
14229 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14230 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14231 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14232 tg3_asic_rev(tp) == ASIC_REV_5720)
14235 err = tg3_restart_hw(tp, reset_phy);
14238 tg3_netif_start(tp);
14240 tg3_full_unlock(tp);
14248 static const struct net_device_ops tg3_netdev_ops = {
14249 .ndo_open = tg3_open,
14250 .ndo_stop = tg3_close,
14251 .ndo_start_xmit = tg3_start_xmit,
14252 .ndo_get_stats64 = tg3_get_stats64,
14253 .ndo_validate_addr = eth_validate_addr,
14254 .ndo_set_rx_mode = tg3_set_rx_mode,
14255 .ndo_set_mac_address = tg3_set_mac_addr,
14256 .ndo_eth_ioctl = tg3_ioctl,
14257 .ndo_tx_timeout = tg3_tx_timeout,
14258 .ndo_change_mtu = tg3_change_mtu,
14259 .ndo_fix_features = tg3_fix_features,
14260 .ndo_set_features = tg3_set_features,
14261 #ifdef CONFIG_NET_POLL_CONTROLLER
14262 .ndo_poll_controller = tg3_poll_controller,
14266 static void tg3_get_eeprom_size(struct tg3 *tp)
14268 u32 cursize, val, magic;
14270 tp->nvram_size = EEPROM_CHIP_SIZE;
14272 if (tg3_nvram_read(tp, 0, &magic) != 0)
14275 if ((magic != TG3_EEPROM_MAGIC) &&
14276 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14277 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14281 * Size the chip by reading offsets at increasing powers of two.
14282 * When we encounter our validation signature, we know the addressing
14283 * has wrapped around, and thus have our chip size.
14287 while (cursize < tp->nvram_size) {
14288 if (tg3_nvram_read(tp, cursize, &val) != 0)
14297 tp->nvram_size = cursize;
14300 static void tg3_get_nvram_size(struct tg3 *tp)
14304 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14307 /* Selfboot format */
14308 if (val != TG3_EEPROM_MAGIC) {
14309 tg3_get_eeprom_size(tp);
14313 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14315 /* This is confusing. We want to operate on the
14316 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14317 * call will read from NVRAM and byteswap the data
14318 * according to the byteswapping settings for all
14319 * other register accesses. This ensures the data we
14320 * want will always reside in the lower 16-bits.
14321 * However, the data in NVRAM is in LE format, which
14322 * means the data from the NVRAM read will always be
14323 * opposite the endianness of the CPU. The 16-bit
14324 * byteswap then brings the data to CPU endianness.
14326 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14330 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14333 static void tg3_get_nvram_info(struct tg3 *tp)
14337 nvcfg1 = tr32(NVRAM_CFG1);
14338 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14339 tg3_flag_set(tp, FLASH);
14341 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14342 tw32(NVRAM_CFG1, nvcfg1);
14345 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14346 tg3_flag(tp, 5780_CLASS)) {
14347 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14348 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14349 tp->nvram_jedecnum = JEDEC_ATMEL;
14350 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14351 tg3_flag_set(tp, NVRAM_BUFFERED);
14353 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14354 tp->nvram_jedecnum = JEDEC_ATMEL;
14355 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14357 case FLASH_VENDOR_ATMEL_EEPROM:
14358 tp->nvram_jedecnum = JEDEC_ATMEL;
14359 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14360 tg3_flag_set(tp, NVRAM_BUFFERED);
14362 case FLASH_VENDOR_ST:
14363 tp->nvram_jedecnum = JEDEC_ST;
14364 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14365 tg3_flag_set(tp, NVRAM_BUFFERED);
14367 case FLASH_VENDOR_SAIFUN:
14368 tp->nvram_jedecnum = JEDEC_SAIFUN;
14369 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14371 case FLASH_VENDOR_SST_SMALL:
14372 case FLASH_VENDOR_SST_LARGE:
14373 tp->nvram_jedecnum = JEDEC_SST;
14374 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14378 tp->nvram_jedecnum = JEDEC_ATMEL;
14379 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14380 tg3_flag_set(tp, NVRAM_BUFFERED);
14384 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14386 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14387 case FLASH_5752PAGE_SIZE_256:
14388 tp->nvram_pagesize = 256;
14390 case FLASH_5752PAGE_SIZE_512:
14391 tp->nvram_pagesize = 512;
14393 case FLASH_5752PAGE_SIZE_1K:
14394 tp->nvram_pagesize = 1024;
14396 case FLASH_5752PAGE_SIZE_2K:
14397 tp->nvram_pagesize = 2048;
14399 case FLASH_5752PAGE_SIZE_4K:
14400 tp->nvram_pagesize = 4096;
14402 case FLASH_5752PAGE_SIZE_264:
14403 tp->nvram_pagesize = 264;
14405 case FLASH_5752PAGE_SIZE_528:
14406 tp->nvram_pagesize = 528;
14411 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14415 nvcfg1 = tr32(NVRAM_CFG1);
14417 /* NVRAM protection for TPM */
14418 if (nvcfg1 & (1 << 27))
14419 tg3_flag_set(tp, PROTECTED_NVRAM);
14421 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14422 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14423 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14424 tp->nvram_jedecnum = JEDEC_ATMEL;
14425 tg3_flag_set(tp, NVRAM_BUFFERED);
14427 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14428 tp->nvram_jedecnum = JEDEC_ATMEL;
14429 tg3_flag_set(tp, NVRAM_BUFFERED);
14430 tg3_flag_set(tp, FLASH);
14432 case FLASH_5752VENDOR_ST_M45PE10:
14433 case FLASH_5752VENDOR_ST_M45PE20:
14434 case FLASH_5752VENDOR_ST_M45PE40:
14435 tp->nvram_jedecnum = JEDEC_ST;
14436 tg3_flag_set(tp, NVRAM_BUFFERED);
14437 tg3_flag_set(tp, FLASH);
14441 if (tg3_flag(tp, FLASH)) {
14442 tg3_nvram_get_pagesize(tp, nvcfg1);
14444 /* For eeprom, set pagesize to maximum eeprom size */
14445 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14447 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14448 tw32(NVRAM_CFG1, nvcfg1);
14452 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14454 u32 nvcfg1, protect = 0;
14456 nvcfg1 = tr32(NVRAM_CFG1);
14458 /* NVRAM protection for TPM */
14459 if (nvcfg1 & (1 << 27)) {
14460 tg3_flag_set(tp, PROTECTED_NVRAM);
14464 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14466 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14467 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14468 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14469 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14470 tp->nvram_jedecnum = JEDEC_ATMEL;
14471 tg3_flag_set(tp, NVRAM_BUFFERED);
14472 tg3_flag_set(tp, FLASH);
14473 tp->nvram_pagesize = 264;
14474 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14475 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14476 tp->nvram_size = (protect ? 0x3e200 :
14477 TG3_NVRAM_SIZE_512KB);
14478 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14479 tp->nvram_size = (protect ? 0x1f200 :
14480 TG3_NVRAM_SIZE_256KB);
14482 tp->nvram_size = (protect ? 0x1f200 :
14483 TG3_NVRAM_SIZE_128KB);
14485 case FLASH_5752VENDOR_ST_M45PE10:
14486 case FLASH_5752VENDOR_ST_M45PE20:
14487 case FLASH_5752VENDOR_ST_M45PE40:
14488 tp->nvram_jedecnum = JEDEC_ST;
14489 tg3_flag_set(tp, NVRAM_BUFFERED);
14490 tg3_flag_set(tp, FLASH);
14491 tp->nvram_pagesize = 256;
14492 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14493 tp->nvram_size = (protect ?
14494 TG3_NVRAM_SIZE_64KB :
14495 TG3_NVRAM_SIZE_128KB);
14496 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14497 tp->nvram_size = (protect ?
14498 TG3_NVRAM_SIZE_64KB :
14499 TG3_NVRAM_SIZE_256KB);
14501 tp->nvram_size = (protect ?
14502 TG3_NVRAM_SIZE_128KB :
14503 TG3_NVRAM_SIZE_512KB);
14508 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14512 nvcfg1 = tr32(NVRAM_CFG1);
14514 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14515 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14516 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14517 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14518 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14519 tp->nvram_jedecnum = JEDEC_ATMEL;
14520 tg3_flag_set(tp, NVRAM_BUFFERED);
14521 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14523 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14524 tw32(NVRAM_CFG1, nvcfg1);
14526 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14527 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14528 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14529 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14530 tp->nvram_jedecnum = JEDEC_ATMEL;
14531 tg3_flag_set(tp, NVRAM_BUFFERED);
14532 tg3_flag_set(tp, FLASH);
14533 tp->nvram_pagesize = 264;
14535 case FLASH_5752VENDOR_ST_M45PE10:
14536 case FLASH_5752VENDOR_ST_M45PE20:
14537 case FLASH_5752VENDOR_ST_M45PE40:
14538 tp->nvram_jedecnum = JEDEC_ST;
14539 tg3_flag_set(tp, NVRAM_BUFFERED);
14540 tg3_flag_set(tp, FLASH);
14541 tp->nvram_pagesize = 256;
14546 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14548 u32 nvcfg1, protect = 0;
14550 nvcfg1 = tr32(NVRAM_CFG1);
14552 /* NVRAM protection for TPM */
14553 if (nvcfg1 & (1 << 27)) {
14554 tg3_flag_set(tp, PROTECTED_NVRAM);
14558 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14560 case FLASH_5761VENDOR_ATMEL_ADB021D:
14561 case FLASH_5761VENDOR_ATMEL_ADB041D:
14562 case FLASH_5761VENDOR_ATMEL_ADB081D:
14563 case FLASH_5761VENDOR_ATMEL_ADB161D:
14564 case FLASH_5761VENDOR_ATMEL_MDB021D:
14565 case FLASH_5761VENDOR_ATMEL_MDB041D:
14566 case FLASH_5761VENDOR_ATMEL_MDB081D:
14567 case FLASH_5761VENDOR_ATMEL_MDB161D:
14568 tp->nvram_jedecnum = JEDEC_ATMEL;
14569 tg3_flag_set(tp, NVRAM_BUFFERED);
14570 tg3_flag_set(tp, FLASH);
14571 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14572 tp->nvram_pagesize = 256;
14574 case FLASH_5761VENDOR_ST_A_M45PE20:
14575 case FLASH_5761VENDOR_ST_A_M45PE40:
14576 case FLASH_5761VENDOR_ST_A_M45PE80:
14577 case FLASH_5761VENDOR_ST_A_M45PE16:
14578 case FLASH_5761VENDOR_ST_M_M45PE20:
14579 case FLASH_5761VENDOR_ST_M_M45PE40:
14580 case FLASH_5761VENDOR_ST_M_M45PE80:
14581 case FLASH_5761VENDOR_ST_M_M45PE16:
14582 tp->nvram_jedecnum = JEDEC_ST;
14583 tg3_flag_set(tp, NVRAM_BUFFERED);
14584 tg3_flag_set(tp, FLASH);
14585 tp->nvram_pagesize = 256;
14590 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14593 case FLASH_5761VENDOR_ATMEL_ADB161D:
14594 case FLASH_5761VENDOR_ATMEL_MDB161D:
14595 case FLASH_5761VENDOR_ST_A_M45PE16:
14596 case FLASH_5761VENDOR_ST_M_M45PE16:
14597 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14599 case FLASH_5761VENDOR_ATMEL_ADB081D:
14600 case FLASH_5761VENDOR_ATMEL_MDB081D:
14601 case FLASH_5761VENDOR_ST_A_M45PE80:
14602 case FLASH_5761VENDOR_ST_M_M45PE80:
14603 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14605 case FLASH_5761VENDOR_ATMEL_ADB041D:
14606 case FLASH_5761VENDOR_ATMEL_MDB041D:
14607 case FLASH_5761VENDOR_ST_A_M45PE40:
14608 case FLASH_5761VENDOR_ST_M_M45PE40:
14609 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14611 case FLASH_5761VENDOR_ATMEL_ADB021D:
14612 case FLASH_5761VENDOR_ATMEL_MDB021D:
14613 case FLASH_5761VENDOR_ST_A_M45PE20:
14614 case FLASH_5761VENDOR_ST_M_M45PE20:
14615 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14621 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14623 tp->nvram_jedecnum = JEDEC_ATMEL;
14624 tg3_flag_set(tp, NVRAM_BUFFERED);
14625 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14628 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14632 nvcfg1 = tr32(NVRAM_CFG1);
14634 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14635 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14636 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14637 tp->nvram_jedecnum = JEDEC_ATMEL;
14638 tg3_flag_set(tp, NVRAM_BUFFERED);
14639 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14641 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14642 tw32(NVRAM_CFG1, nvcfg1);
14644 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14645 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14646 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14647 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14648 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14649 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14650 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14651 tp->nvram_jedecnum = JEDEC_ATMEL;
14652 tg3_flag_set(tp, NVRAM_BUFFERED);
14653 tg3_flag_set(tp, FLASH);
14655 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14656 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14657 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14658 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14659 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14661 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14662 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14663 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14665 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14666 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14667 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14671 case FLASH_5752VENDOR_ST_M45PE10:
14672 case FLASH_5752VENDOR_ST_M45PE20:
14673 case FLASH_5752VENDOR_ST_M45PE40:
14674 tp->nvram_jedecnum = JEDEC_ST;
14675 tg3_flag_set(tp, NVRAM_BUFFERED);
14676 tg3_flag_set(tp, FLASH);
14678 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14679 case FLASH_5752VENDOR_ST_M45PE10:
14680 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14682 case FLASH_5752VENDOR_ST_M45PE20:
14683 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14685 case FLASH_5752VENDOR_ST_M45PE40:
14686 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14691 tg3_flag_set(tp, NO_NVRAM);
14695 tg3_nvram_get_pagesize(tp, nvcfg1);
14696 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14697 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14701 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14705 nvcfg1 = tr32(NVRAM_CFG1);
14707 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14708 case FLASH_5717VENDOR_ATMEL_EEPROM:
14709 case FLASH_5717VENDOR_MICRO_EEPROM:
14710 tp->nvram_jedecnum = JEDEC_ATMEL;
14711 tg3_flag_set(tp, NVRAM_BUFFERED);
14712 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14714 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14715 tw32(NVRAM_CFG1, nvcfg1);
14717 case FLASH_5717VENDOR_ATMEL_MDB011D:
14718 case FLASH_5717VENDOR_ATMEL_ADB011B:
14719 case FLASH_5717VENDOR_ATMEL_ADB011D:
14720 case FLASH_5717VENDOR_ATMEL_MDB021D:
14721 case FLASH_5717VENDOR_ATMEL_ADB021B:
14722 case FLASH_5717VENDOR_ATMEL_ADB021D:
14723 case FLASH_5717VENDOR_ATMEL_45USPT:
14724 tp->nvram_jedecnum = JEDEC_ATMEL;
14725 tg3_flag_set(tp, NVRAM_BUFFERED);
14726 tg3_flag_set(tp, FLASH);
14728 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14729 case FLASH_5717VENDOR_ATMEL_MDB021D:
14730 /* Detect size with tg3_nvram_get_size() */
14732 case FLASH_5717VENDOR_ATMEL_ADB021B:
14733 case FLASH_5717VENDOR_ATMEL_ADB021D:
14734 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14737 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14741 case FLASH_5717VENDOR_ST_M_M25PE10:
14742 case FLASH_5717VENDOR_ST_A_M25PE10:
14743 case FLASH_5717VENDOR_ST_M_M45PE10:
14744 case FLASH_5717VENDOR_ST_A_M45PE10:
14745 case FLASH_5717VENDOR_ST_M_M25PE20:
14746 case FLASH_5717VENDOR_ST_A_M25PE20:
14747 case FLASH_5717VENDOR_ST_M_M45PE20:
14748 case FLASH_5717VENDOR_ST_A_M45PE20:
14749 case FLASH_5717VENDOR_ST_25USPT:
14750 case FLASH_5717VENDOR_ST_45USPT:
14751 tp->nvram_jedecnum = JEDEC_ST;
14752 tg3_flag_set(tp, NVRAM_BUFFERED);
14753 tg3_flag_set(tp, FLASH);
14755 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14756 case FLASH_5717VENDOR_ST_M_M25PE20:
14757 case FLASH_5717VENDOR_ST_M_M45PE20:
14758 /* Detect size with tg3_nvram_get_size() */
14760 case FLASH_5717VENDOR_ST_A_M25PE20:
14761 case FLASH_5717VENDOR_ST_A_M45PE20:
14762 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14765 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14770 tg3_flag_set(tp, NO_NVRAM);
14774 tg3_nvram_get_pagesize(tp, nvcfg1);
14775 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14776 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14779 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14781 u32 nvcfg1, nvmpinstrp, nv_status;
14783 nvcfg1 = tr32(NVRAM_CFG1);
14784 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14786 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14787 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14788 tg3_flag_set(tp, NO_NVRAM);
14792 switch (nvmpinstrp) {
14793 case FLASH_5762_MX25L_100:
14794 case FLASH_5762_MX25L_200:
14795 case FLASH_5762_MX25L_400:
14796 case FLASH_5762_MX25L_800:
14797 case FLASH_5762_MX25L_160_320:
14798 tp->nvram_pagesize = 4096;
14799 tp->nvram_jedecnum = JEDEC_MACRONIX;
14800 tg3_flag_set(tp, NVRAM_BUFFERED);
14801 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14802 tg3_flag_set(tp, FLASH);
14803 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14805 (1 << (nv_status >> AUTOSENSE_DEVID &
14806 AUTOSENSE_DEVID_MASK)
14807 << AUTOSENSE_SIZE_IN_MB);
14810 case FLASH_5762_EEPROM_HD:
14811 nvmpinstrp = FLASH_5720_EEPROM_HD;
14813 case FLASH_5762_EEPROM_LD:
14814 nvmpinstrp = FLASH_5720_EEPROM_LD;
14816 case FLASH_5720VENDOR_M_ST_M45PE20:
14817 /* This pinstrap supports multiple sizes, so force it
14818 * to read the actual size from location 0xf0.
14820 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14825 switch (nvmpinstrp) {
14826 case FLASH_5720_EEPROM_HD:
14827 case FLASH_5720_EEPROM_LD:
14828 tp->nvram_jedecnum = JEDEC_ATMEL;
14829 tg3_flag_set(tp, NVRAM_BUFFERED);
14831 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14832 tw32(NVRAM_CFG1, nvcfg1);
14833 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14834 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14836 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14838 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14839 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14840 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14841 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14842 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14843 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14844 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14845 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14846 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14847 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14848 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14849 case FLASH_5720VENDOR_ATMEL_45USPT:
14850 tp->nvram_jedecnum = JEDEC_ATMEL;
14851 tg3_flag_set(tp, NVRAM_BUFFERED);
14852 tg3_flag_set(tp, FLASH);
14854 switch (nvmpinstrp) {
14855 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14856 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14857 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14858 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14860 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14861 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14862 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14863 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14865 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14866 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14867 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14870 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14871 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14875 case FLASH_5720VENDOR_M_ST_M25PE10:
14876 case FLASH_5720VENDOR_M_ST_M45PE10:
14877 case FLASH_5720VENDOR_A_ST_M25PE10:
14878 case FLASH_5720VENDOR_A_ST_M45PE10:
14879 case FLASH_5720VENDOR_M_ST_M25PE20:
14880 case FLASH_5720VENDOR_M_ST_M45PE20:
14881 case FLASH_5720VENDOR_A_ST_M25PE20:
14882 case FLASH_5720VENDOR_A_ST_M45PE20:
14883 case FLASH_5720VENDOR_M_ST_M25PE40:
14884 case FLASH_5720VENDOR_M_ST_M45PE40:
14885 case FLASH_5720VENDOR_A_ST_M25PE40:
14886 case FLASH_5720VENDOR_A_ST_M45PE40:
14887 case FLASH_5720VENDOR_M_ST_M25PE80:
14888 case FLASH_5720VENDOR_M_ST_M45PE80:
14889 case FLASH_5720VENDOR_A_ST_M25PE80:
14890 case FLASH_5720VENDOR_A_ST_M45PE80:
14891 case FLASH_5720VENDOR_ST_25USPT:
14892 case FLASH_5720VENDOR_ST_45USPT:
14893 tp->nvram_jedecnum = JEDEC_ST;
14894 tg3_flag_set(tp, NVRAM_BUFFERED);
14895 tg3_flag_set(tp, FLASH);
14897 switch (nvmpinstrp) {
14898 case FLASH_5720VENDOR_M_ST_M25PE20:
14899 case FLASH_5720VENDOR_M_ST_M45PE20:
14900 case FLASH_5720VENDOR_A_ST_M25PE20:
14901 case FLASH_5720VENDOR_A_ST_M45PE20:
14902 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14904 case FLASH_5720VENDOR_M_ST_M25PE40:
14905 case FLASH_5720VENDOR_M_ST_M45PE40:
14906 case FLASH_5720VENDOR_A_ST_M25PE40:
14907 case FLASH_5720VENDOR_A_ST_M45PE40:
14908 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14910 case FLASH_5720VENDOR_M_ST_M25PE80:
14911 case FLASH_5720VENDOR_M_ST_M45PE80:
14912 case FLASH_5720VENDOR_A_ST_M25PE80:
14913 case FLASH_5720VENDOR_A_ST_M45PE80:
14914 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14917 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14918 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14923 tg3_flag_set(tp, NO_NVRAM);
14927 tg3_nvram_get_pagesize(tp, nvcfg1);
14928 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14929 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14931 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14934 if (tg3_nvram_read(tp, 0, &val))
14937 if (val != TG3_EEPROM_MAGIC &&
14938 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14939 tg3_flag_set(tp, NO_NVRAM);
14943 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14944 static void tg3_nvram_init(struct tg3 *tp)
14946 if (tg3_flag(tp, IS_SSB_CORE)) {
14947 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14948 tg3_flag_clear(tp, NVRAM);
14949 tg3_flag_clear(tp, NVRAM_BUFFERED);
14950 tg3_flag_set(tp, NO_NVRAM);
14954 tw32_f(GRC_EEPROM_ADDR,
14955 (EEPROM_ADDR_FSM_RESET |
14956 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14957 EEPROM_ADDR_CLKPERD_SHIFT)));
14961 /* Enable seeprom accesses. */
14962 tw32_f(GRC_LOCAL_CTRL,
14963 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14966 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14967 tg3_asic_rev(tp) != ASIC_REV_5701) {
14968 tg3_flag_set(tp, NVRAM);
14970 if (tg3_nvram_lock(tp)) {
14971 netdev_warn(tp->dev,
14972 "Cannot get nvram lock, %s failed\n",
14976 tg3_enable_nvram_access(tp);
14978 tp->nvram_size = 0;
14980 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14981 tg3_get_5752_nvram_info(tp);
14982 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14983 tg3_get_5755_nvram_info(tp);
14984 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14985 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14986 tg3_asic_rev(tp) == ASIC_REV_5785)
14987 tg3_get_5787_nvram_info(tp);
14988 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14989 tg3_get_5761_nvram_info(tp);
14990 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14991 tg3_get_5906_nvram_info(tp);
14992 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14993 tg3_flag(tp, 57765_CLASS))
14994 tg3_get_57780_nvram_info(tp);
14995 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14996 tg3_asic_rev(tp) == ASIC_REV_5719)
14997 tg3_get_5717_nvram_info(tp);
14998 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14999 tg3_asic_rev(tp) == ASIC_REV_5762)
15000 tg3_get_5720_nvram_info(tp);
15002 tg3_get_nvram_info(tp);
15004 if (tp->nvram_size == 0)
15005 tg3_get_nvram_size(tp);
15007 tg3_disable_nvram_access(tp);
15008 tg3_nvram_unlock(tp);
15011 tg3_flag_clear(tp, NVRAM);
15012 tg3_flag_clear(tp, NVRAM_BUFFERED);
15014 tg3_get_eeprom_size(tp);
15018 struct subsys_tbl_ent {
15019 u16 subsys_vendor, subsys_devid;
15023 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15024 /* Broadcom boards. */
15025 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15026 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15027 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15028 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15029 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15030 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15031 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15032 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15033 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15034 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15035 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15036 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15037 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15038 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15039 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15040 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15041 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15042 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15043 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15044 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15045 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15046 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15049 { TG3PCI_SUBVENDOR_ID_3COM,
15050 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15051 { TG3PCI_SUBVENDOR_ID_3COM,
15052 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15053 { TG3PCI_SUBVENDOR_ID_3COM,
15054 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15055 { TG3PCI_SUBVENDOR_ID_3COM,
15056 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15057 { TG3PCI_SUBVENDOR_ID_3COM,
15058 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15061 { TG3PCI_SUBVENDOR_ID_DELL,
15062 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15063 { TG3PCI_SUBVENDOR_ID_DELL,
15064 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15065 { TG3PCI_SUBVENDOR_ID_DELL,
15066 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15067 { TG3PCI_SUBVENDOR_ID_DELL,
15068 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15070 /* Compaq boards. */
15071 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15072 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15073 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15074 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15075 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15076 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15077 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15078 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15079 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15080 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15083 { TG3PCI_SUBVENDOR_ID_IBM,
15084 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15087 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15091 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15092 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15093 tp->pdev->subsystem_vendor) &&
15094 (subsys_id_to_phy_id[i].subsys_devid ==
15095 tp->pdev->subsystem_device))
15096 return &subsys_id_to_phy_id[i];
15101 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15105 tp->phy_id = TG3_PHY_ID_INVALID;
15106 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15108 /* Assume an onboard device and WOL capable by default. */
15109 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15110 tg3_flag_set(tp, WOL_CAP);
15112 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15113 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15114 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15115 tg3_flag_set(tp, IS_NIC);
15117 val = tr32(VCPU_CFGSHDW);
15118 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15119 tg3_flag_set(tp, ASPM_WORKAROUND);
15120 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15121 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15122 tg3_flag_set(tp, WOL_ENABLE);
15123 device_set_wakeup_enable(&tp->pdev->dev, true);
15128 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15129 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15130 u32 nic_cfg, led_cfg;
15131 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15132 u32 nic_phy_id, ver, eeprom_phy_id;
15133 int eeprom_phy_serdes = 0;
15135 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15136 tp->nic_sram_data_cfg = nic_cfg;
15138 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15139 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15140 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15141 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15142 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15143 (ver > 0) && (ver < 0x100))
15144 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15146 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15147 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15149 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15150 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15151 tg3_asic_rev(tp) == ASIC_REV_5720)
15152 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15154 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15155 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15156 eeprom_phy_serdes = 1;
15158 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15159 if (nic_phy_id != 0) {
15160 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15161 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15163 eeprom_phy_id = (id1 >> 16) << 10;
15164 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15165 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15169 tp->phy_id = eeprom_phy_id;
15170 if (eeprom_phy_serdes) {
15171 if (!tg3_flag(tp, 5705_PLUS))
15172 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15174 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15177 if (tg3_flag(tp, 5750_PLUS))
15178 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15179 SHASTA_EXT_LED_MODE_MASK);
15181 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15185 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15186 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15189 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15190 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15193 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15194 tp->led_ctrl = LED_CTRL_MODE_MAC;
15196 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15197 * read on some older 5700/5701 bootcode.
15199 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15200 tg3_asic_rev(tp) == ASIC_REV_5701)
15201 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15205 case SHASTA_EXT_LED_SHARED:
15206 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15207 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15208 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15209 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15210 LED_CTRL_MODE_PHY_2);
15212 if (tg3_flag(tp, 5717_PLUS) ||
15213 tg3_asic_rev(tp) == ASIC_REV_5762)
15214 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15215 LED_CTRL_BLINK_RATE_MASK;
15219 case SHASTA_EXT_LED_MAC:
15220 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15223 case SHASTA_EXT_LED_COMBO:
15224 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15225 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15226 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15227 LED_CTRL_MODE_PHY_2);
15232 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15233 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15234 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15235 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15237 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15238 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15240 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15241 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15242 if ((tp->pdev->subsystem_vendor ==
15243 PCI_VENDOR_ID_ARIMA) &&
15244 (tp->pdev->subsystem_device == 0x205a ||
15245 tp->pdev->subsystem_device == 0x2063))
15246 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15248 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15249 tg3_flag_set(tp, IS_NIC);
15252 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15253 tg3_flag_set(tp, ENABLE_ASF);
15254 if (tg3_flag(tp, 5750_PLUS))
15255 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15258 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15259 tg3_flag(tp, 5750_PLUS))
15260 tg3_flag_set(tp, ENABLE_APE);
15262 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15263 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15264 tg3_flag_clear(tp, WOL_CAP);
15266 if (tg3_flag(tp, WOL_CAP) &&
15267 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15268 tg3_flag_set(tp, WOL_ENABLE);
15269 device_set_wakeup_enable(&tp->pdev->dev, true);
15272 if (cfg2 & (1 << 17))
15273 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15275 /* serdes signal pre-emphasis in register 0x590 set by */
15276 /* bootcode if bit 18 is set */
15277 if (cfg2 & (1 << 18))
15278 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15280 if ((tg3_flag(tp, 57765_PLUS) ||
15281 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15282 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15283 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15284 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15286 if (tg3_flag(tp, PCI_EXPRESS)) {
15289 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15290 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15291 !tg3_flag(tp, 57765_PLUS) &&
15292 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15293 tg3_flag_set(tp, ASPM_WORKAROUND);
15294 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15295 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15296 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15297 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15300 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15301 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15302 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15303 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15304 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15305 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15307 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15308 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15311 if (tg3_flag(tp, WOL_CAP))
15312 device_set_wakeup_enable(&tp->pdev->dev,
15313 tg3_flag(tp, WOL_ENABLE));
15315 device_set_wakeup_capable(&tp->pdev->dev, false);
15318 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15321 u32 val2, off = offset * 8;
15323 err = tg3_nvram_lock(tp);
15327 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15328 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15329 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15330 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15333 for (i = 0; i < 100; i++) {
15334 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15335 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15336 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15342 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15344 tg3_nvram_unlock(tp);
15345 if (val2 & APE_OTP_STATUS_CMD_DONE)
15351 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15356 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15357 tw32(OTP_CTRL, cmd);
15359 /* Wait for up to 1 ms for command to execute. */
15360 for (i = 0; i < 100; i++) {
15361 val = tr32(OTP_STATUS);
15362 if (val & OTP_STATUS_CMD_DONE)
15367 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15370 /* Read the gphy configuration from the OTP region of the chip. The gphy
15371 * configuration is a 32-bit value that straddles the alignment boundary.
15372 * We do two 32-bit reads and then shift and merge the results.
15374 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15376 u32 bhalf_otp, thalf_otp;
15378 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15380 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15383 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15385 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15388 thalf_otp = tr32(OTP_READ_DATA);
15390 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15392 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15395 bhalf_otp = tr32(OTP_READ_DATA);
15397 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15400 static void tg3_phy_init_link_config(struct tg3 *tp)
15402 u32 adv = ADVERTISED_Autoneg;
15404 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15405 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15406 adv |= ADVERTISED_1000baseT_Half;
15407 adv |= ADVERTISED_1000baseT_Full;
15410 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15411 adv |= ADVERTISED_100baseT_Half |
15412 ADVERTISED_100baseT_Full |
15413 ADVERTISED_10baseT_Half |
15414 ADVERTISED_10baseT_Full |
15417 adv |= ADVERTISED_FIBRE;
15419 tp->link_config.advertising = adv;
15420 tp->link_config.speed = SPEED_UNKNOWN;
15421 tp->link_config.duplex = DUPLEX_UNKNOWN;
15422 tp->link_config.autoneg = AUTONEG_ENABLE;
15423 tp->link_config.active_speed = SPEED_UNKNOWN;
15424 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15429 static int tg3_phy_probe(struct tg3 *tp)
15431 u32 hw_phy_id_1, hw_phy_id_2;
15432 u32 hw_phy_id, hw_phy_id_masked;
15435 /* flow control autonegotiation is default behavior */
15436 tg3_flag_set(tp, PAUSE_AUTONEG);
15437 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15439 if (tg3_flag(tp, ENABLE_APE)) {
15440 switch (tp->pci_fn) {
15442 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15445 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15448 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15451 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15456 if (!tg3_flag(tp, ENABLE_ASF) &&
15457 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15458 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15459 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15460 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15462 if (tg3_flag(tp, USE_PHYLIB))
15463 return tg3_phy_init(tp);
15465 /* Reading the PHY ID register can conflict with ASF
15466 * firmware access to the PHY hardware.
15469 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15470 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15472 /* Now read the physical PHY_ID from the chip and verify
15473 * that it is sane. If it doesn't look good, we fall back
15474 * to either the hard-coded table based PHY_ID and failing
15475 * that the value found in the eeprom area.
15477 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15478 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15480 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15481 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15482 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15484 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15487 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15488 tp->phy_id = hw_phy_id;
15489 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15490 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15492 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15494 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15495 /* Do nothing, phy ID already set up in
15496 * tg3_get_eeprom_hw_cfg().
15499 struct subsys_tbl_ent *p;
15501 /* No eeprom signature? Try the hardcoded
15502 * subsys device table.
15504 p = tg3_lookup_by_subsys(tp);
15506 tp->phy_id = p->phy_id;
15507 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15508 /* For now we saw the IDs 0xbc050cd0,
15509 * 0xbc050f80 and 0xbc050c30 on devices
15510 * connected to an BCM4785 and there are
15511 * probably more. Just assume that the phy is
15512 * supported when it is connected to a SSB core
15519 tp->phy_id == TG3_PHY_ID_BCM8002)
15520 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15524 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15525 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15526 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15527 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15528 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15529 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15530 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15531 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15532 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15533 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15535 tp->eee.supported = SUPPORTED_100baseT_Full |
15536 SUPPORTED_1000baseT_Full;
15537 tp->eee.advertised = ADVERTISED_100baseT_Full |
15538 ADVERTISED_1000baseT_Full;
15539 tp->eee.eee_enabled = 1;
15540 tp->eee.tx_lpi_enabled = 1;
15541 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15544 tg3_phy_init_link_config(tp);
15546 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15547 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15548 !tg3_flag(tp, ENABLE_APE) &&
15549 !tg3_flag(tp, ENABLE_ASF)) {
15552 tg3_readphy(tp, MII_BMSR, &bmsr);
15553 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15554 (bmsr & BMSR_LSTATUS))
15555 goto skip_phy_reset;
15557 err = tg3_phy_reset(tp);
15561 tg3_phy_set_wirespeed(tp);
15563 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15564 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15565 tp->link_config.flowctrl);
15567 tg3_writephy(tp, MII_BMCR,
15568 BMCR_ANENABLE | BMCR_ANRESTART);
15573 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15574 err = tg3_init_5401phy_dsp(tp);
15578 err = tg3_init_5401phy_dsp(tp);
15584 static void tg3_read_vpd(struct tg3 *tp)
15587 unsigned int len, vpdlen;
15590 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15594 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15595 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15599 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15602 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15603 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15607 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15608 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15611 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15612 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15614 goto out_not_found;
15616 if (len > TG3_BPN_SIZE)
15617 goto out_not_found;
15619 memcpy(tp->board_part_number, &vpd_data[i], len);
15623 if (tp->board_part_number[0])
15627 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15628 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15629 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15630 strcpy(tp->board_part_number, "BCM5717");
15631 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15632 strcpy(tp->board_part_number, "BCM5718");
15635 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15636 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15637 strcpy(tp->board_part_number, "BCM57780");
15638 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15639 strcpy(tp->board_part_number, "BCM57760");
15640 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15641 strcpy(tp->board_part_number, "BCM57790");
15642 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15643 strcpy(tp->board_part_number, "BCM57788");
15646 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15647 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15648 strcpy(tp->board_part_number, "BCM57761");
15649 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15650 strcpy(tp->board_part_number, "BCM57765");
15651 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15652 strcpy(tp->board_part_number, "BCM57781");
15653 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15654 strcpy(tp->board_part_number, "BCM57785");
15655 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15656 strcpy(tp->board_part_number, "BCM57791");
15657 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15658 strcpy(tp->board_part_number, "BCM57795");
15661 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15662 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15663 strcpy(tp->board_part_number, "BCM57762");
15664 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15665 strcpy(tp->board_part_number, "BCM57766");
15666 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15667 strcpy(tp->board_part_number, "BCM57782");
15668 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15669 strcpy(tp->board_part_number, "BCM57786");
15672 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15673 strcpy(tp->board_part_number, "BCM95906");
15676 strcpy(tp->board_part_number, "none");
15680 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15684 if (tg3_nvram_read(tp, offset, &val) ||
15685 (val & 0xfc000000) != 0x0c000000 ||
15686 tg3_nvram_read(tp, offset + 4, &val) ||
15693 static void tg3_read_bc_ver(struct tg3 *tp)
15695 u32 val, offset, start, ver_offset;
15697 bool newver = false;
15699 if (tg3_nvram_read(tp, 0xc, &offset) ||
15700 tg3_nvram_read(tp, 0x4, &start))
15703 offset = tg3_nvram_logical_addr(tp, offset);
15705 if (tg3_nvram_read(tp, offset, &val))
15708 if ((val & 0xfc000000) == 0x0c000000) {
15709 if (tg3_nvram_read(tp, offset + 4, &val))
15716 dst_off = strlen(tp->fw_ver);
15719 if (TG3_VER_SIZE - dst_off < 16 ||
15720 tg3_nvram_read(tp, offset + 8, &ver_offset))
15723 offset = offset + ver_offset - start;
15724 for (i = 0; i < 16; i += 4) {
15726 if (tg3_nvram_read_be32(tp, offset + i, &v))
15729 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15734 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15737 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15738 TG3_NVM_BCVER_MAJSFT;
15739 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15740 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15741 "v%d.%02d", major, minor);
15745 static void tg3_read_hwsb_ver(struct tg3 *tp)
15747 u32 val, major, minor;
15749 /* Use native endian representation */
15750 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15753 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15754 TG3_NVM_HWSB_CFG1_MAJSFT;
15755 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15756 TG3_NVM_HWSB_CFG1_MINSFT;
15758 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15761 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15763 u32 offset, major, minor, build;
15765 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15767 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15770 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15771 case TG3_EEPROM_SB_REVISION_0:
15772 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15774 case TG3_EEPROM_SB_REVISION_2:
15775 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15777 case TG3_EEPROM_SB_REVISION_3:
15778 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15780 case TG3_EEPROM_SB_REVISION_4:
15781 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15783 case TG3_EEPROM_SB_REVISION_5:
15784 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15786 case TG3_EEPROM_SB_REVISION_6:
15787 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15793 if (tg3_nvram_read(tp, offset, &val))
15796 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15797 TG3_EEPROM_SB_EDH_BLD_SHFT;
15798 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15799 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15800 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15802 if (minor > 99 || build > 26)
15805 offset = strlen(tp->fw_ver);
15806 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15807 " v%d.%02d", major, minor);
15810 offset = strlen(tp->fw_ver);
15811 if (offset < TG3_VER_SIZE - 1)
15812 tp->fw_ver[offset] = 'a' + build - 1;
15816 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15818 u32 val, offset, start;
15821 for (offset = TG3_NVM_DIR_START;
15822 offset < TG3_NVM_DIR_END;
15823 offset += TG3_NVM_DIRENT_SIZE) {
15824 if (tg3_nvram_read(tp, offset, &val))
15827 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15831 if (offset == TG3_NVM_DIR_END)
15834 if (!tg3_flag(tp, 5705_PLUS))
15835 start = 0x08000000;
15836 else if (tg3_nvram_read(tp, offset - 4, &start))
15839 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15840 !tg3_fw_img_is_valid(tp, offset) ||
15841 tg3_nvram_read(tp, offset + 8, &val))
15844 offset += val - start;
15846 vlen = strlen(tp->fw_ver);
15848 tp->fw_ver[vlen++] = ',';
15849 tp->fw_ver[vlen++] = ' ';
15851 for (i = 0; i < 4; i++) {
15853 if (tg3_nvram_read_be32(tp, offset, &v))
15856 offset += sizeof(v);
15858 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15859 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15863 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15868 static void tg3_probe_ncsi(struct tg3 *tp)
15872 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15873 if (apedata != APE_SEG_SIG_MAGIC)
15876 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15877 if (!(apedata & APE_FW_STATUS_READY))
15880 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15881 tg3_flag_set(tp, APE_HAS_NCSI);
15884 static void tg3_read_dash_ver(struct tg3 *tp)
15890 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15892 if (tg3_flag(tp, APE_HAS_NCSI))
15894 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15899 vlen = strlen(tp->fw_ver);
15901 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15903 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15904 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15905 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15906 (apedata & APE_FW_VERSION_BLDMSK));
15909 static void tg3_read_otp_ver(struct tg3 *tp)
15913 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15916 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15917 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15918 TG3_OTP_MAGIC0_VALID(val)) {
15919 u64 val64 = (u64) val << 32 | val2;
15923 for (i = 0; i < 7; i++) {
15924 if ((val64 & 0xff) == 0)
15926 ver = val64 & 0xff;
15929 vlen = strlen(tp->fw_ver);
15930 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15934 static void tg3_read_fw_ver(struct tg3 *tp)
15937 bool vpd_vers = false;
15939 if (tp->fw_ver[0] != 0)
15942 if (tg3_flag(tp, NO_NVRAM)) {
15943 strcat(tp->fw_ver, "sb");
15944 tg3_read_otp_ver(tp);
15948 if (tg3_nvram_read(tp, 0, &val))
15951 if (val == TG3_EEPROM_MAGIC)
15952 tg3_read_bc_ver(tp);
15953 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15954 tg3_read_sb_ver(tp, val);
15955 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15956 tg3_read_hwsb_ver(tp);
15958 if (tg3_flag(tp, ENABLE_ASF)) {
15959 if (tg3_flag(tp, ENABLE_APE)) {
15960 tg3_probe_ncsi(tp);
15962 tg3_read_dash_ver(tp);
15963 } else if (!vpd_vers) {
15964 tg3_read_mgmtfw_ver(tp);
15968 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15971 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15973 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15974 return TG3_RX_RET_MAX_SIZE_5717;
15975 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15976 return TG3_RX_RET_MAX_SIZE_5700;
15978 return TG3_RX_RET_MAX_SIZE_5705;
15981 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15982 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15983 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15984 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15988 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15990 struct pci_dev *peer;
15991 unsigned int func, devnr = tp->pdev->devfn & ~7;
15993 for (func = 0; func < 8; func++) {
15994 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15995 if (peer && peer != tp->pdev)
15999 /* 5704 can be configured in single-port mode, set peer to
16000 * tp->pdev in that case.
16008 * We don't need to keep the refcount elevated; there's no way
16009 * to remove one half of this device without removing the other
16016 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16018 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16019 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16022 /* All devices that use the alternate
16023 * ASIC REV location have a CPMU.
16025 tg3_flag_set(tp, CPMU_PRESENT);
16027 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16028 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16029 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16030 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16031 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16032 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16038 reg = TG3PCI_GEN2_PRODID_ASICREV;
16039 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16043 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16049 reg = TG3PCI_GEN15_PRODID_ASICREV;
16051 reg = TG3PCI_PRODID_ASICREV;
16053 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16056 /* Wrong chip ID in 5752 A0. This code can be removed later
16057 * as A0 is not in production.
16059 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16060 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16062 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16063 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16065 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16066 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16067 tg3_asic_rev(tp) == ASIC_REV_5720)
16068 tg3_flag_set(tp, 5717_PLUS);
16070 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16071 tg3_asic_rev(tp) == ASIC_REV_57766)
16072 tg3_flag_set(tp, 57765_CLASS);
16074 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16075 tg3_asic_rev(tp) == ASIC_REV_5762)
16076 tg3_flag_set(tp, 57765_PLUS);
16078 /* Intentionally exclude ASIC_REV_5906 */
16079 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16080 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16081 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16082 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16083 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16084 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16085 tg3_flag(tp, 57765_PLUS))
16086 tg3_flag_set(tp, 5755_PLUS);
16088 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16089 tg3_asic_rev(tp) == ASIC_REV_5714)
16090 tg3_flag_set(tp, 5780_CLASS);
16092 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16093 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16094 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16095 tg3_flag(tp, 5755_PLUS) ||
16096 tg3_flag(tp, 5780_CLASS))
16097 tg3_flag_set(tp, 5750_PLUS);
16099 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16100 tg3_flag(tp, 5750_PLUS))
16101 tg3_flag_set(tp, 5705_PLUS);
16104 static bool tg3_10_100_only_device(struct tg3 *tp,
16105 const struct pci_device_id *ent)
16107 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16109 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16110 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16111 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16114 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16115 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16116 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16126 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16129 u32 pci_state_reg, grc_misc_cfg;
16134 /* Force memory write invalidate off. If we leave it on,
16135 * then on 5700_BX chips we have to enable a workaround.
16136 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16137 * to match the cacheline size. The Broadcom driver have this
16138 * workaround but turns MWI off all the times so never uses
16139 * it. This seems to suggest that the workaround is insufficient.
16141 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16142 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16143 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16145 /* Important! -- Make sure register accesses are byteswapped
16146 * correctly. Also, for those chips that require it, make
16147 * sure that indirect register accesses are enabled before
16148 * the first operation.
16150 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16152 tp->misc_host_ctrl |= (misc_ctrl_reg &
16153 MISC_HOST_CTRL_CHIPREV);
16154 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16155 tp->misc_host_ctrl);
16157 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16159 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16160 * we need to disable memory and use config. cycles
16161 * only to access all registers. The 5702/03 chips
16162 * can mistakenly decode the special cycles from the
16163 * ICH chipsets as memory write cycles, causing corruption
16164 * of register and memory space. Only certain ICH bridges
16165 * will drive special cycles with non-zero data during the
16166 * address phase which can fall within the 5703's address
16167 * range. This is not an ICH bug as the PCI spec allows
16168 * non-zero address during special cycles. However, only
16169 * these ICH bridges are known to drive non-zero addresses
16170 * during special cycles.
16172 * Since special cycles do not cross PCI bridges, we only
16173 * enable this workaround if the 5703 is on the secondary
16174 * bus of these ICH bridges.
16176 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16177 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16178 static struct tg3_dev_id {
16182 } ich_chipsets[] = {
16183 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16185 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16187 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16189 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16193 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16194 struct pci_dev *bridge = NULL;
16196 while (pci_id->vendor != 0) {
16197 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16203 if (pci_id->rev != PCI_ANY_ID) {
16204 if (bridge->revision > pci_id->rev)
16207 if (bridge->subordinate &&
16208 (bridge->subordinate->number ==
16209 tp->pdev->bus->number)) {
16210 tg3_flag_set(tp, ICH_WORKAROUND);
16211 pci_dev_put(bridge);
16217 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16218 static struct tg3_dev_id {
16221 } bridge_chipsets[] = {
16222 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16223 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16226 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16227 struct pci_dev *bridge = NULL;
16229 while (pci_id->vendor != 0) {
16230 bridge = pci_get_device(pci_id->vendor,
16237 if (bridge->subordinate &&
16238 (bridge->subordinate->number <=
16239 tp->pdev->bus->number) &&
16240 (bridge->subordinate->busn_res.end >=
16241 tp->pdev->bus->number)) {
16242 tg3_flag_set(tp, 5701_DMA_BUG);
16243 pci_dev_put(bridge);
16249 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16250 * DMA addresses > 40-bit. This bridge may have other additional
16251 * 57xx devices behind it in some 4-port NIC designs for example.
16252 * Any tg3 device found behind the bridge will also need the 40-bit
16255 if (tg3_flag(tp, 5780_CLASS)) {
16256 tg3_flag_set(tp, 40BIT_DMA_BUG);
16257 tp->msi_cap = tp->pdev->msi_cap;
16259 struct pci_dev *bridge = NULL;
16262 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16263 PCI_DEVICE_ID_SERVERWORKS_EPB,
16265 if (bridge && bridge->subordinate &&
16266 (bridge->subordinate->number <=
16267 tp->pdev->bus->number) &&
16268 (bridge->subordinate->busn_res.end >=
16269 tp->pdev->bus->number)) {
16270 tg3_flag_set(tp, 40BIT_DMA_BUG);
16271 pci_dev_put(bridge);
16277 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16278 tg3_asic_rev(tp) == ASIC_REV_5714)
16279 tp->pdev_peer = tg3_find_peer(tp);
16281 /* Determine TSO capabilities */
16282 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16283 ; /* Do nothing. HW bug. */
16284 else if (tg3_flag(tp, 57765_PLUS))
16285 tg3_flag_set(tp, HW_TSO_3);
16286 else if (tg3_flag(tp, 5755_PLUS) ||
16287 tg3_asic_rev(tp) == ASIC_REV_5906)
16288 tg3_flag_set(tp, HW_TSO_2);
16289 else if (tg3_flag(tp, 5750_PLUS)) {
16290 tg3_flag_set(tp, HW_TSO_1);
16291 tg3_flag_set(tp, TSO_BUG);
16292 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16293 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16294 tg3_flag_clear(tp, TSO_BUG);
16295 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16296 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16297 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16298 tg3_flag_set(tp, FW_TSO);
16299 tg3_flag_set(tp, TSO_BUG);
16300 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16301 tp->fw_needed = FIRMWARE_TG3TSO5;
16303 tp->fw_needed = FIRMWARE_TG3TSO;
16306 /* Selectively allow TSO based on operating conditions */
16307 if (tg3_flag(tp, HW_TSO_1) ||
16308 tg3_flag(tp, HW_TSO_2) ||
16309 tg3_flag(tp, HW_TSO_3) ||
16310 tg3_flag(tp, FW_TSO)) {
16311 /* For firmware TSO, assume ASF is disabled.
16312 * We'll disable TSO later if we discover ASF
16313 * is enabled in tg3_get_eeprom_hw_cfg().
16315 tg3_flag_set(tp, TSO_CAPABLE);
16317 tg3_flag_clear(tp, TSO_CAPABLE);
16318 tg3_flag_clear(tp, TSO_BUG);
16319 tp->fw_needed = NULL;
16322 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16323 tp->fw_needed = FIRMWARE_TG3;
16325 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16326 tp->fw_needed = FIRMWARE_TG357766;
16330 if (tg3_flag(tp, 5750_PLUS)) {
16331 tg3_flag_set(tp, SUPPORT_MSI);
16332 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16333 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16334 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16335 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16336 tp->pdev_peer == tp->pdev))
16337 tg3_flag_clear(tp, SUPPORT_MSI);
16339 if (tg3_flag(tp, 5755_PLUS) ||
16340 tg3_asic_rev(tp) == ASIC_REV_5906) {
16341 tg3_flag_set(tp, 1SHOT_MSI);
16344 if (tg3_flag(tp, 57765_PLUS)) {
16345 tg3_flag_set(tp, SUPPORT_MSIX);
16346 tp->irq_max = TG3_IRQ_MAX_VECS;
16352 if (tp->irq_max > 1) {
16353 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16354 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16356 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16357 tg3_asic_rev(tp) == ASIC_REV_5720)
16358 tp->txq_max = tp->irq_max - 1;
16361 if (tg3_flag(tp, 5755_PLUS) ||
16362 tg3_asic_rev(tp) == ASIC_REV_5906)
16363 tg3_flag_set(tp, SHORT_DMA_BUG);
16365 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16366 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16368 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16369 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16370 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16371 tg3_asic_rev(tp) == ASIC_REV_5762)
16372 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16374 if (tg3_flag(tp, 57765_PLUS) &&
16375 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16376 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16378 if (!tg3_flag(tp, 5705_PLUS) ||
16379 tg3_flag(tp, 5780_CLASS) ||
16380 tg3_flag(tp, USE_JUMBO_BDFLAG))
16381 tg3_flag_set(tp, JUMBO_CAPABLE);
16383 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16386 if (pci_is_pcie(tp->pdev)) {
16389 tg3_flag_set(tp, PCI_EXPRESS);
16391 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16392 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16393 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16394 tg3_flag_clear(tp, HW_TSO_2);
16395 tg3_flag_clear(tp, TSO_CAPABLE);
16397 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16398 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16399 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16400 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16401 tg3_flag_set(tp, CLKREQ_BUG);
16402 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16403 tg3_flag_set(tp, L1PLLPD_EN);
16405 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16406 /* BCM5785 devices are effectively PCIe devices, and should
16407 * follow PCIe codepaths, but do not have a PCIe capabilities
16410 tg3_flag_set(tp, PCI_EXPRESS);
16411 } else if (!tg3_flag(tp, 5705_PLUS) ||
16412 tg3_flag(tp, 5780_CLASS)) {
16413 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16414 if (!tp->pcix_cap) {
16415 dev_err(&tp->pdev->dev,
16416 "Cannot find PCI-X capability, aborting\n");
16420 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16421 tg3_flag_set(tp, PCIX_MODE);
16424 /* If we have an AMD 762 or VIA K8T800 chipset, write
16425 * reordering to the mailbox registers done by the host
16426 * controller can cause major troubles. We read back from
16427 * every mailbox register write to force the writes to be
16428 * posted to the chip in order.
16430 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16431 !tg3_flag(tp, PCI_EXPRESS))
16432 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16434 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16435 &tp->pci_cacheline_sz);
16436 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16437 &tp->pci_lat_timer);
16438 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16439 tp->pci_lat_timer < 64) {
16440 tp->pci_lat_timer = 64;
16441 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16442 tp->pci_lat_timer);
16445 /* Important! -- It is critical that the PCI-X hw workaround
16446 * situation is decided before the first MMIO register access.
16448 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16449 /* 5700 BX chips need to have their TX producer index
16450 * mailboxes written twice to workaround a bug.
16452 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16454 /* If we are in PCI-X mode, enable register write workaround.
16456 * The workaround is to use indirect register accesses
16457 * for all chip writes not to mailbox registers.
16459 if (tg3_flag(tp, PCIX_MODE)) {
16462 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16464 /* The chip can have it's power management PCI config
16465 * space registers clobbered due to this bug.
16466 * So explicitly force the chip into D0 here.
16468 pci_read_config_dword(tp->pdev,
16469 tp->pdev->pm_cap + PCI_PM_CTRL,
16471 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16472 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16473 pci_write_config_dword(tp->pdev,
16474 tp->pdev->pm_cap + PCI_PM_CTRL,
16477 /* Also, force SERR#/PERR# in PCI command. */
16478 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16479 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16480 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16484 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16485 tg3_flag_set(tp, PCI_HIGH_SPEED);
16486 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16487 tg3_flag_set(tp, PCI_32BIT);
16489 /* Chip-specific fixup from Broadcom driver */
16490 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16491 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16492 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16493 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16496 /* Default fast path register access methods */
16497 tp->read32 = tg3_read32;
16498 tp->write32 = tg3_write32;
16499 tp->read32_mbox = tg3_read32;
16500 tp->write32_mbox = tg3_write32;
16501 tp->write32_tx_mbox = tg3_write32;
16502 tp->write32_rx_mbox = tg3_write32;
16504 /* Various workaround register access methods */
16505 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16506 tp->write32 = tg3_write_indirect_reg32;
16507 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16508 (tg3_flag(tp, PCI_EXPRESS) &&
16509 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16511 * Back to back register writes can cause problems on these
16512 * chips, the workaround is to read back all reg writes
16513 * except those to mailbox regs.
16515 * See tg3_write_indirect_reg32().
16517 tp->write32 = tg3_write_flush_reg32;
16520 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16521 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16522 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16523 tp->write32_rx_mbox = tg3_write_flush_reg32;
16526 if (tg3_flag(tp, ICH_WORKAROUND)) {
16527 tp->read32 = tg3_read_indirect_reg32;
16528 tp->write32 = tg3_write_indirect_reg32;
16529 tp->read32_mbox = tg3_read_indirect_mbox;
16530 tp->write32_mbox = tg3_write_indirect_mbox;
16531 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16532 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16537 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16538 pci_cmd &= ~PCI_COMMAND_MEMORY;
16539 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16541 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16542 tp->read32_mbox = tg3_read32_mbox_5906;
16543 tp->write32_mbox = tg3_write32_mbox_5906;
16544 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16545 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16548 if (tp->write32 == tg3_write_indirect_reg32 ||
16549 (tg3_flag(tp, PCIX_MODE) &&
16550 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16551 tg3_asic_rev(tp) == ASIC_REV_5701)))
16552 tg3_flag_set(tp, SRAM_USE_CONFIG);
16554 /* The memory arbiter has to be enabled in order for SRAM accesses
16555 * to succeed. Normally on powerup the tg3 chip firmware will make
16556 * sure it is enabled, but other entities such as system netboot
16557 * code might disable it.
16559 val = tr32(MEMARB_MODE);
16560 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16562 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16563 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16564 tg3_flag(tp, 5780_CLASS)) {
16565 if (tg3_flag(tp, PCIX_MODE)) {
16566 pci_read_config_dword(tp->pdev,
16567 tp->pcix_cap + PCI_X_STATUS,
16569 tp->pci_fn = val & 0x7;
16571 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16572 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16573 tg3_asic_rev(tp) == ASIC_REV_5720) {
16574 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16575 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16576 val = tr32(TG3_CPMU_STATUS);
16578 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16579 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16581 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16582 TG3_CPMU_STATUS_FSHFT_5719;
16585 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16586 tp->write32_tx_mbox = tg3_write_flush_reg32;
16587 tp->write32_rx_mbox = tg3_write_flush_reg32;
16590 /* Get eeprom hw config before calling tg3_set_power_state().
16591 * In particular, the TG3_FLAG_IS_NIC flag must be
16592 * determined before calling tg3_set_power_state() so that
16593 * we know whether or not to switch out of Vaux power.
16594 * When the flag is set, it means that GPIO1 is used for eeprom
16595 * write protect and also implies that it is a LOM where GPIOs
16596 * are not used to switch power.
16598 tg3_get_eeprom_hw_cfg(tp);
16600 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16601 tg3_flag_clear(tp, TSO_CAPABLE);
16602 tg3_flag_clear(tp, TSO_BUG);
16603 tp->fw_needed = NULL;
16606 if (tg3_flag(tp, ENABLE_APE)) {
16607 /* Allow reads and writes to the
16608 * APE register and memory space.
16610 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16611 PCISTATE_ALLOW_APE_SHMEM_WR |
16612 PCISTATE_ALLOW_APE_PSPACE_WR;
16613 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16616 tg3_ape_lock_init(tp);
16617 tp->ape_hb_interval =
16618 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16621 /* Set up tp->grc_local_ctrl before calling
16622 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16623 * will bring 5700's external PHY out of reset.
16624 * It is also used as eeprom write protect on LOMs.
16626 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16627 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16628 tg3_flag(tp, EEPROM_WRITE_PROT))
16629 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16630 GRC_LCLCTRL_GPIO_OUTPUT1);
16631 /* Unused GPIO3 must be driven as output on 5752 because there
16632 * are no pull-up resistors on unused GPIO pins.
16634 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16635 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16637 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16638 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16639 tg3_flag(tp, 57765_CLASS))
16640 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16642 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16644 /* Turn off the debug UART. */
16645 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16646 if (tg3_flag(tp, IS_NIC))
16647 /* Keep VMain power. */
16648 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16649 GRC_LCLCTRL_GPIO_OUTPUT0;
16652 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16653 tp->grc_local_ctrl |=
16654 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16656 /* Switch out of Vaux if it is a NIC */
16657 tg3_pwrsrc_switch_to_vmain(tp);
16659 /* Derive initial jumbo mode from MTU assigned in
16660 * ether_setup() via the alloc_etherdev() call
16662 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16663 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16665 /* Determine WakeOnLan speed to use. */
16666 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16667 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16668 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16669 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16670 tg3_flag_clear(tp, WOL_SPEED_100MB);
16672 tg3_flag_set(tp, WOL_SPEED_100MB);
16675 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16676 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16678 /* A few boards don't want Ethernet@WireSpeed phy feature */
16679 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16680 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16681 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16682 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16683 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16684 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16685 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16687 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16688 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16689 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16690 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16691 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16693 if (tg3_flag(tp, 5705_PLUS) &&
16694 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16695 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16696 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16697 !tg3_flag(tp, 57765_PLUS)) {
16698 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16699 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16700 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16701 tg3_asic_rev(tp) == ASIC_REV_5761) {
16702 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16703 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16704 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16705 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16706 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16708 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16711 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16712 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16713 tp->phy_otp = tg3_read_otp_phycfg(tp);
16714 if (tp->phy_otp == 0)
16715 tp->phy_otp = TG3_OTP_DEFAULT;
16718 if (tg3_flag(tp, CPMU_PRESENT))
16719 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16721 tp->mi_mode = MAC_MI_MODE_BASE;
16723 tp->coalesce_mode = 0;
16724 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16725 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16726 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16728 /* Set these bits to enable statistics workaround. */
16729 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16730 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16731 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16732 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16733 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16734 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16737 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16738 tg3_asic_rev(tp) == ASIC_REV_57780)
16739 tg3_flag_set(tp, USE_PHYLIB);
16741 err = tg3_mdio_init(tp);
16745 /* Initialize data/descriptor byte/word swapping. */
16746 val = tr32(GRC_MODE);
16747 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16748 tg3_asic_rev(tp) == ASIC_REV_5762)
16749 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16750 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16751 GRC_MODE_B2HRX_ENABLE |
16752 GRC_MODE_HTX2B_ENABLE |
16753 GRC_MODE_HOST_STACKUP);
16755 val &= GRC_MODE_HOST_STACKUP;
16757 tw32(GRC_MODE, val | tp->grc_mode);
16759 tg3_switch_clocks(tp);
16761 /* Clear this out for sanity. */
16762 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16764 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16765 tw32(TG3PCI_REG_BASE_ADDR, 0);
16767 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16769 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16770 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16771 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16772 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16773 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16774 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16775 void __iomem *sram_base;
16777 /* Write some dummy words into the SRAM status block
16778 * area, see if it reads back correctly. If the return
16779 * value is bad, force enable the PCIX workaround.
16781 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16783 writel(0x00000000, sram_base);
16784 writel(0x00000000, sram_base + 4);
16785 writel(0xffffffff, sram_base + 4);
16786 if (readl(sram_base) != 0x00000000)
16787 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16792 tg3_nvram_init(tp);
16794 /* If the device has an NVRAM, no need to load patch firmware */
16795 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16796 !tg3_flag(tp, NO_NVRAM))
16797 tp->fw_needed = NULL;
16799 grc_misc_cfg = tr32(GRC_MISC_CFG);
16800 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16802 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16803 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16804 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16805 tg3_flag_set(tp, IS_5788);
16807 if (!tg3_flag(tp, IS_5788) &&
16808 tg3_asic_rev(tp) != ASIC_REV_5700)
16809 tg3_flag_set(tp, TAGGED_STATUS);
16810 if (tg3_flag(tp, TAGGED_STATUS)) {
16811 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16812 HOSTCC_MODE_CLRTICK_TXBD);
16814 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16815 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16816 tp->misc_host_ctrl);
16819 /* Preserve the APE MAC_MODE bits */
16820 if (tg3_flag(tp, ENABLE_APE))
16821 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16825 if (tg3_10_100_only_device(tp, ent))
16826 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16828 err = tg3_phy_probe(tp);
16830 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16831 /* ... but do not return immediately ... */
16836 tg3_read_fw_ver(tp);
16838 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16839 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16841 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16842 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16844 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16847 /* 5700 {AX,BX} chips have a broken status block link
16848 * change bit implementation, so we must use the
16849 * status register in those cases.
16851 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16852 tg3_flag_set(tp, USE_LINKCHG_REG);
16854 tg3_flag_clear(tp, USE_LINKCHG_REG);
16856 /* The led_ctrl is set during tg3_phy_probe, here we might
16857 * have to force the link status polling mechanism based
16858 * upon subsystem IDs.
16860 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16861 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16862 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16863 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16864 tg3_flag_set(tp, USE_LINKCHG_REG);
16867 /* For all SERDES we poll the MAC status register. */
16868 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16869 tg3_flag_set(tp, POLL_SERDES);
16871 tg3_flag_clear(tp, POLL_SERDES);
16873 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16874 tg3_flag_set(tp, POLL_CPMU_LINK);
16876 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16877 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16878 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16879 tg3_flag(tp, PCIX_MODE)) {
16880 tp->rx_offset = NET_SKB_PAD;
16881 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16882 tp->rx_copy_thresh = ~(u16)0;
16886 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16887 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16888 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16890 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16892 /* Increment the rx prod index on the rx std ring by at most
16893 * 8 for these chips to workaround hw errata.
16895 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16896 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16897 tg3_asic_rev(tp) == ASIC_REV_5755)
16898 tp->rx_std_max_post = 8;
16900 if (tg3_flag(tp, ASPM_WORKAROUND))
16901 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16902 PCIE_PWR_MGMT_L1_THRESH_MSK;
16907 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16909 u32 hi, lo, mac_offset;
16913 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16916 if (tg3_flag(tp, IS_SSB_CORE)) {
16917 err = ssb_gige_get_macaddr(tp->pdev, addr);
16918 if (!err && is_valid_ether_addr(addr))
16923 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16924 tg3_flag(tp, 5780_CLASS)) {
16925 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16927 if (tg3_nvram_lock(tp))
16928 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16930 tg3_nvram_unlock(tp);
16931 } else if (tg3_flag(tp, 5717_PLUS)) {
16932 if (tp->pci_fn & 1)
16934 if (tp->pci_fn > 1)
16935 mac_offset += 0x18c;
16936 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16939 /* First try to get it from MAC address mailbox. */
16940 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16941 if ((hi >> 16) == 0x484b) {
16942 addr[0] = (hi >> 8) & 0xff;
16943 addr[1] = (hi >> 0) & 0xff;
16945 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16946 addr[2] = (lo >> 24) & 0xff;
16947 addr[3] = (lo >> 16) & 0xff;
16948 addr[4] = (lo >> 8) & 0xff;
16949 addr[5] = (lo >> 0) & 0xff;
16951 /* Some old bootcode may report a 0 MAC address in SRAM */
16952 addr_ok = is_valid_ether_addr(addr);
16955 /* Next, try NVRAM. */
16956 if (!tg3_flag(tp, NO_NVRAM) &&
16957 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16958 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16959 memcpy(&addr[0], ((char *)&hi) + 2, 2);
16960 memcpy(&addr[2], (char *)&lo, sizeof(lo));
16962 /* Finally just fetch it out of the MAC control regs. */
16964 hi = tr32(MAC_ADDR_0_HIGH);
16965 lo = tr32(MAC_ADDR_0_LOW);
16967 addr[5] = lo & 0xff;
16968 addr[4] = (lo >> 8) & 0xff;
16969 addr[3] = (lo >> 16) & 0xff;
16970 addr[2] = (lo >> 24) & 0xff;
16971 addr[1] = hi & 0xff;
16972 addr[0] = (hi >> 8) & 0xff;
16976 if (!is_valid_ether_addr(addr))
16981 #define BOUNDARY_SINGLE_CACHELINE 1
16982 #define BOUNDARY_MULTI_CACHELINE 2
16984 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16986 int cacheline_size;
16990 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16992 cacheline_size = 1024;
16994 cacheline_size = (int) byte * 4;
16996 /* On 5703 and later chips, the boundary bits have no
16999 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17000 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17001 !tg3_flag(tp, PCI_EXPRESS))
17004 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17005 goal = BOUNDARY_MULTI_CACHELINE;
17007 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17008 goal = BOUNDARY_SINGLE_CACHELINE;
17014 if (tg3_flag(tp, 57765_PLUS)) {
17015 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17022 /* PCI controllers on most RISC systems tend to disconnect
17023 * when a device tries to burst across a cache-line boundary.
17024 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17026 * Unfortunately, for PCI-E there are only limited
17027 * write-side controls for this, and thus for reads
17028 * we will still get the disconnects. We'll also waste
17029 * these PCI cycles for both read and write for chips
17030 * other than 5700 and 5701 which do not implement the
17033 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17034 switch (cacheline_size) {
17039 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17040 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17041 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17043 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17044 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17049 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17050 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17054 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17055 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17058 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17059 switch (cacheline_size) {
17063 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17064 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17065 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17071 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17072 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17076 switch (cacheline_size) {
17078 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17079 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17080 DMA_RWCTRL_WRITE_BNDRY_16);
17085 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17086 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17087 DMA_RWCTRL_WRITE_BNDRY_32);
17092 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17093 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17094 DMA_RWCTRL_WRITE_BNDRY_64);
17099 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17100 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17101 DMA_RWCTRL_WRITE_BNDRY_128);
17106 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17107 DMA_RWCTRL_WRITE_BNDRY_256);
17110 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17111 DMA_RWCTRL_WRITE_BNDRY_512);
17115 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17116 DMA_RWCTRL_WRITE_BNDRY_1024);
17125 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17126 int size, bool to_device)
17128 struct tg3_internal_buffer_desc test_desc;
17129 u32 sram_dma_descs;
17132 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17134 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17135 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17136 tw32(RDMAC_STATUS, 0);
17137 tw32(WDMAC_STATUS, 0);
17139 tw32(BUFMGR_MODE, 0);
17140 tw32(FTQ_RESET, 0);
17142 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17143 test_desc.addr_lo = buf_dma & 0xffffffff;
17144 test_desc.nic_mbuf = 0x00002100;
17145 test_desc.len = size;
17148 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17149 * the *second* time the tg3 driver was getting loaded after an
17152 * Broadcom tells me:
17153 * ...the DMA engine is connected to the GRC block and a DMA
17154 * reset may affect the GRC block in some unpredictable way...
17155 * The behavior of resets to individual blocks has not been tested.
17157 * Broadcom noted the GRC reset will also reset all sub-components.
17160 test_desc.cqid_sqid = (13 << 8) | 2;
17162 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17165 test_desc.cqid_sqid = (16 << 8) | 7;
17167 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17170 test_desc.flags = 0x00000005;
17172 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17175 val = *(((u32 *)&test_desc) + i);
17176 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17177 sram_dma_descs + (i * sizeof(u32)));
17178 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17180 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17183 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17185 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17188 for (i = 0; i < 40; i++) {
17192 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17194 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17195 if ((val & 0xffff) == sram_dma_descs) {
17206 #define TEST_BUFFER_SIZE 0x2000
17208 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17209 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17213 static int tg3_test_dma(struct tg3 *tp)
17215 dma_addr_t buf_dma;
17216 u32 *buf, saved_dma_rwctrl;
17219 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17220 &buf_dma, GFP_KERNEL);
17226 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17227 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17229 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17231 if (tg3_flag(tp, 57765_PLUS))
17234 if (tg3_flag(tp, PCI_EXPRESS)) {
17235 /* DMA read watermark not used on PCIE */
17236 tp->dma_rwctrl |= 0x00180000;
17237 } else if (!tg3_flag(tp, PCIX_MODE)) {
17238 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17239 tg3_asic_rev(tp) == ASIC_REV_5750)
17240 tp->dma_rwctrl |= 0x003f0000;
17242 tp->dma_rwctrl |= 0x003f000f;
17244 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17245 tg3_asic_rev(tp) == ASIC_REV_5704) {
17246 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17247 u32 read_water = 0x7;
17249 /* If the 5704 is behind the EPB bridge, we can
17250 * do the less restrictive ONE_DMA workaround for
17251 * better performance.
17253 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17254 tg3_asic_rev(tp) == ASIC_REV_5704)
17255 tp->dma_rwctrl |= 0x8000;
17256 else if (ccval == 0x6 || ccval == 0x7)
17257 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17259 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17261 /* Set bit 23 to enable PCIX hw bug fix */
17263 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17264 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17266 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17267 /* 5780 always in PCIX mode */
17268 tp->dma_rwctrl |= 0x00144000;
17269 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17270 /* 5714 always in PCIX mode */
17271 tp->dma_rwctrl |= 0x00148000;
17273 tp->dma_rwctrl |= 0x001b000f;
17276 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17277 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17279 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17280 tg3_asic_rev(tp) == ASIC_REV_5704)
17281 tp->dma_rwctrl &= 0xfffffff0;
17283 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17284 tg3_asic_rev(tp) == ASIC_REV_5701) {
17285 /* Remove this if it causes problems for some boards. */
17286 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17288 /* On 5700/5701 chips, we need to set this bit.
17289 * Otherwise the chip will issue cacheline transactions
17290 * to streamable DMA memory with not all the byte
17291 * enables turned on. This is an error on several
17292 * RISC PCI controllers, in particular sparc64.
17294 * On 5703/5704 chips, this bit has been reassigned
17295 * a different meaning. In particular, it is used
17296 * on those chips to enable a PCI-X workaround.
17298 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17301 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17304 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17305 tg3_asic_rev(tp) != ASIC_REV_5701)
17308 /* It is best to perform DMA test with maximum write burst size
17309 * to expose the 5700/5701 write DMA bug.
17311 saved_dma_rwctrl = tp->dma_rwctrl;
17312 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17313 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17318 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17321 /* Send the buffer to the chip. */
17322 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17324 dev_err(&tp->pdev->dev,
17325 "%s: Buffer write failed. err = %d\n",
17330 /* Now read it back. */
17331 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17333 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17334 "err = %d\n", __func__, ret);
17339 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17343 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17344 DMA_RWCTRL_WRITE_BNDRY_16) {
17345 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17346 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17347 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17350 dev_err(&tp->pdev->dev,
17351 "%s: Buffer corrupted on read back! "
17352 "(%d != %d)\n", __func__, p[i], i);
17358 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17364 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17365 DMA_RWCTRL_WRITE_BNDRY_16) {
17366 /* DMA test passed without adjusting DMA boundary,
17367 * now look for chipsets that are known to expose the
17368 * DMA bug without failing the test.
17370 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17371 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17372 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17374 /* Safe to use the calculated DMA boundary. */
17375 tp->dma_rwctrl = saved_dma_rwctrl;
17378 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17382 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17387 static void tg3_init_bufmgr_config(struct tg3 *tp)
17389 if (tg3_flag(tp, 57765_PLUS)) {
17390 tp->bufmgr_config.mbuf_read_dma_low_water =
17391 DEFAULT_MB_RDMA_LOW_WATER_5705;
17392 tp->bufmgr_config.mbuf_mac_rx_low_water =
17393 DEFAULT_MB_MACRX_LOW_WATER_57765;
17394 tp->bufmgr_config.mbuf_high_water =
17395 DEFAULT_MB_HIGH_WATER_57765;
17397 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17398 DEFAULT_MB_RDMA_LOW_WATER_5705;
17399 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17400 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17401 tp->bufmgr_config.mbuf_high_water_jumbo =
17402 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17403 } else if (tg3_flag(tp, 5705_PLUS)) {
17404 tp->bufmgr_config.mbuf_read_dma_low_water =
17405 DEFAULT_MB_RDMA_LOW_WATER_5705;
17406 tp->bufmgr_config.mbuf_mac_rx_low_water =
17407 DEFAULT_MB_MACRX_LOW_WATER_5705;
17408 tp->bufmgr_config.mbuf_high_water =
17409 DEFAULT_MB_HIGH_WATER_5705;
17410 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17411 tp->bufmgr_config.mbuf_mac_rx_low_water =
17412 DEFAULT_MB_MACRX_LOW_WATER_5906;
17413 tp->bufmgr_config.mbuf_high_water =
17414 DEFAULT_MB_HIGH_WATER_5906;
17417 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17418 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17419 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17420 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17421 tp->bufmgr_config.mbuf_high_water_jumbo =
17422 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17424 tp->bufmgr_config.mbuf_read_dma_low_water =
17425 DEFAULT_MB_RDMA_LOW_WATER;
17426 tp->bufmgr_config.mbuf_mac_rx_low_water =
17427 DEFAULT_MB_MACRX_LOW_WATER;
17428 tp->bufmgr_config.mbuf_high_water =
17429 DEFAULT_MB_HIGH_WATER;
17431 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17432 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17433 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17434 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17435 tp->bufmgr_config.mbuf_high_water_jumbo =
17436 DEFAULT_MB_HIGH_WATER_JUMBO;
17439 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17440 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17443 static char *tg3_phy_string(struct tg3 *tp)
17445 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17446 case TG3_PHY_ID_BCM5400: return "5400";
17447 case TG3_PHY_ID_BCM5401: return "5401";
17448 case TG3_PHY_ID_BCM5411: return "5411";
17449 case TG3_PHY_ID_BCM5701: return "5701";
17450 case TG3_PHY_ID_BCM5703: return "5703";
17451 case TG3_PHY_ID_BCM5704: return "5704";
17452 case TG3_PHY_ID_BCM5705: return "5705";
17453 case TG3_PHY_ID_BCM5750: return "5750";
17454 case TG3_PHY_ID_BCM5752: return "5752";
17455 case TG3_PHY_ID_BCM5714: return "5714";
17456 case TG3_PHY_ID_BCM5780: return "5780";
17457 case TG3_PHY_ID_BCM5755: return "5755";
17458 case TG3_PHY_ID_BCM5787: return "5787";
17459 case TG3_PHY_ID_BCM5784: return "5784";
17460 case TG3_PHY_ID_BCM5756: return "5722/5756";
17461 case TG3_PHY_ID_BCM5906: return "5906";
17462 case TG3_PHY_ID_BCM5761: return "5761";
17463 case TG3_PHY_ID_BCM5718C: return "5718C";
17464 case TG3_PHY_ID_BCM5718S: return "5718S";
17465 case TG3_PHY_ID_BCM57765: return "57765";
17466 case TG3_PHY_ID_BCM5719C: return "5719C";
17467 case TG3_PHY_ID_BCM5720C: return "5720C";
17468 case TG3_PHY_ID_BCM5762: return "5762C";
17469 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17470 case 0: return "serdes";
17471 default: return "unknown";
17475 static char *tg3_bus_string(struct tg3 *tp, char *str)
17477 if (tg3_flag(tp, PCI_EXPRESS)) {
17478 strcpy(str, "PCI Express");
17480 } else if (tg3_flag(tp, PCIX_MODE)) {
17481 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17483 strcpy(str, "PCIX:");
17485 if ((clock_ctrl == 7) ||
17486 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17487 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17488 strcat(str, "133MHz");
17489 else if (clock_ctrl == 0)
17490 strcat(str, "33MHz");
17491 else if (clock_ctrl == 2)
17492 strcat(str, "50MHz");
17493 else if (clock_ctrl == 4)
17494 strcat(str, "66MHz");
17495 else if (clock_ctrl == 6)
17496 strcat(str, "100MHz");
17498 strcpy(str, "PCI:");
17499 if (tg3_flag(tp, PCI_HIGH_SPEED))
17500 strcat(str, "66MHz");
17502 strcat(str, "33MHz");
17504 if (tg3_flag(tp, PCI_32BIT))
17505 strcat(str, ":32-bit");
17507 strcat(str, ":64-bit");
17511 static void tg3_init_coal(struct tg3 *tp)
17513 struct ethtool_coalesce *ec = &tp->coal;
17515 memset(ec, 0, sizeof(*ec));
17516 ec->cmd = ETHTOOL_GCOALESCE;
17517 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17518 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17519 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17520 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17521 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17522 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17523 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17524 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17525 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17527 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17528 HOSTCC_MODE_CLRTICK_TXBD)) {
17529 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17530 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17531 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17532 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17535 if (tg3_flag(tp, 5705_PLUS)) {
17536 ec->rx_coalesce_usecs_irq = 0;
17537 ec->tx_coalesce_usecs_irq = 0;
17538 ec->stats_block_coalesce_usecs = 0;
17542 static int tg3_init_one(struct pci_dev *pdev,
17543 const struct pci_device_id *ent)
17545 struct net_device *dev;
17548 u32 sndmbx, rcvmbx, intmbx;
17550 u64 dma_mask, persist_dma_mask;
17551 netdev_features_t features = 0;
17552 u8 addr[ETH_ALEN] __aligned(2);
17554 err = pci_enable_device(pdev);
17556 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17560 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17562 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17563 goto err_out_disable_pdev;
17566 pci_set_master(pdev);
17568 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17571 goto err_out_free_res;
17574 SET_NETDEV_DEV(dev, &pdev->dev);
17576 tp = netdev_priv(dev);
17579 tp->rx_mode = TG3_DEF_RX_MODE;
17580 tp->tx_mode = TG3_DEF_TX_MODE;
17582 tp->pcierr_recovery = false;
17585 tp->msg_enable = tg3_debug;
17587 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17589 if (pdev_is_ssb_gige_core(pdev)) {
17590 tg3_flag_set(tp, IS_SSB_CORE);
17591 if (ssb_gige_must_flush_posted_writes(pdev))
17592 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17593 if (ssb_gige_one_dma_at_once(pdev))
17594 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17595 if (ssb_gige_have_roboswitch(pdev)) {
17596 tg3_flag_set(tp, USE_PHYLIB);
17597 tg3_flag_set(tp, ROBOSWITCH);
17599 if (ssb_gige_is_rgmii(pdev))
17600 tg3_flag_set(tp, RGMII_MODE);
17603 /* The word/byte swap controls here control register access byte
17604 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17607 tp->misc_host_ctrl =
17608 MISC_HOST_CTRL_MASK_PCI_INT |
17609 MISC_HOST_CTRL_WORD_SWAP |
17610 MISC_HOST_CTRL_INDIR_ACCESS |
17611 MISC_HOST_CTRL_PCISTATE_RW;
17613 /* The NONFRM (non-frame) byte/word swap controls take effect
17614 * on descriptor entries, anything which isn't packet data.
17616 * The StrongARM chips on the board (one for tx, one for rx)
17617 * are running in big-endian mode.
17619 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17620 GRC_MODE_WSWAP_NONFRM_DATA);
17621 #ifdef __BIG_ENDIAN
17622 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17624 spin_lock_init(&tp->lock);
17625 spin_lock_init(&tp->indirect_lock);
17626 INIT_WORK(&tp->reset_task, tg3_reset_task);
17628 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17630 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17632 goto err_out_free_dev;
17635 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17636 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17637 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17638 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17639 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17640 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17641 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17642 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17650 tg3_flag_set(tp, ENABLE_APE);
17651 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17652 if (!tp->aperegs) {
17653 dev_err(&pdev->dev,
17654 "Cannot map APE registers, aborting\n");
17656 goto err_out_iounmap;
17660 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17661 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17663 dev->ethtool_ops = &tg3_ethtool_ops;
17664 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17665 dev->netdev_ops = &tg3_netdev_ops;
17666 dev->irq = pdev->irq;
17668 err = tg3_get_invariants(tp, ent);
17670 dev_err(&pdev->dev,
17671 "Problem fetching invariants of chip, aborting\n");
17672 goto err_out_apeunmap;
17675 /* The EPB bridge inside 5714, 5715, and 5780 and any
17676 * device behind the EPB cannot support DMA addresses > 40-bit.
17677 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17678 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17679 * do DMA address check in tg3_start_xmit().
17681 if (tg3_flag(tp, IS_5788))
17682 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17683 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17684 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17685 #ifdef CONFIG_HIGHMEM
17686 dma_mask = DMA_BIT_MASK(64);
17689 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17691 /* Configure DMA attributes. */
17692 if (dma_mask > DMA_BIT_MASK(32)) {
17693 err = dma_set_mask(&pdev->dev, dma_mask);
17695 features |= NETIF_F_HIGHDMA;
17696 err = dma_set_coherent_mask(&pdev->dev,
17699 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17700 "DMA for consistent allocations\n");
17701 goto err_out_apeunmap;
17705 if (err || dma_mask == DMA_BIT_MASK(32)) {
17706 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17708 dev_err(&pdev->dev,
17709 "No usable DMA configuration, aborting\n");
17710 goto err_out_apeunmap;
17714 tg3_init_bufmgr_config(tp);
17716 /* 5700 B0 chips do not support checksumming correctly due
17717 * to hardware bugs.
17719 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17720 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17722 if (tg3_flag(tp, 5755_PLUS))
17723 features |= NETIF_F_IPV6_CSUM;
17726 /* TSO is on by default on chips that support hardware TSO.
17727 * Firmware TSO on older chips gives lower performance, so it
17728 * is off by default, but can be enabled using ethtool.
17730 if ((tg3_flag(tp, HW_TSO_1) ||
17731 tg3_flag(tp, HW_TSO_2) ||
17732 tg3_flag(tp, HW_TSO_3)) &&
17733 (features & NETIF_F_IP_CSUM))
17734 features |= NETIF_F_TSO;
17735 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17736 if (features & NETIF_F_IPV6_CSUM)
17737 features |= NETIF_F_TSO6;
17738 if (tg3_flag(tp, HW_TSO_3) ||
17739 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17740 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17741 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17742 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17743 tg3_asic_rev(tp) == ASIC_REV_57780)
17744 features |= NETIF_F_TSO_ECN;
17747 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17748 NETIF_F_HW_VLAN_CTAG_RX;
17749 dev->vlan_features |= features;
17752 * Add loopback capability only for a subset of devices that support
17753 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17754 * loopback for the remaining devices.
17756 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17757 !tg3_flag(tp, CPMU_PRESENT))
17758 /* Add the loopback capability */
17759 features |= NETIF_F_LOOPBACK;
17761 dev->hw_features |= features;
17762 dev->priv_flags |= IFF_UNICAST_FLT;
17764 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17765 dev->min_mtu = TG3_MIN_MTU;
17766 dev->max_mtu = TG3_MAX_MTU(tp);
17768 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17769 !tg3_flag(tp, TSO_CAPABLE) &&
17770 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17771 tg3_flag_set(tp, MAX_RXPEND_64);
17772 tp->rx_pending = 63;
17775 err = tg3_get_device_address(tp, addr);
17777 dev_err(&pdev->dev,
17778 "Could not obtain valid ethernet address, aborting\n");
17779 goto err_out_apeunmap;
17781 eth_hw_addr_set(dev, addr);
17783 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17784 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17785 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17786 for (i = 0; i < tp->irq_max; i++) {
17787 struct tg3_napi *tnapi = &tp->napi[i];
17790 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17792 tnapi->int_mbox = intmbx;
17798 tnapi->consmbox = rcvmbx;
17799 tnapi->prodmbox = sndmbx;
17802 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17804 tnapi->coal_now = HOSTCC_MODE_NOW;
17806 if (!tg3_flag(tp, SUPPORT_MSIX))
17810 * If we support MSIX, we'll be using RSS. If we're using
17811 * RSS, the first vector only handles link interrupts and the
17812 * remaining vectors handle rx and tx interrupts. Reuse the
17813 * mailbox values for the next iteration. The values we setup
17814 * above are still useful for the single vectored mode.
17828 * Reset chip in case UNDI or EFI driver did not shutdown
17829 * DMA self test will enable WDMAC and we'll see (spurious)
17830 * pending DMA on the PCI bus at that point.
17832 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17833 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17834 tg3_full_lock(tp, 0);
17835 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17836 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17837 tg3_full_unlock(tp);
17840 err = tg3_test_dma(tp);
17842 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17843 goto err_out_apeunmap;
17848 pci_set_drvdata(pdev, dev);
17850 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17851 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17852 tg3_asic_rev(tp) == ASIC_REV_5762)
17853 tg3_flag_set(tp, PTP_CAPABLE);
17855 tg3_timer_init(tp);
17857 tg3_carrier_off(tp);
17859 err = register_netdev(dev);
17861 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17862 goto err_out_apeunmap;
17865 if (tg3_flag(tp, PTP_CAPABLE)) {
17867 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17869 if (IS_ERR(tp->ptp_clock))
17870 tp->ptp_clock = NULL;
17873 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17874 tp->board_part_number,
17875 tg3_chip_rev_id(tp),
17876 tg3_bus_string(tp, str),
17879 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17882 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17883 ethtype = "10/100Base-TX";
17884 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17885 ethtype = "1000Base-SX";
17887 ethtype = "10/100/1000Base-T";
17889 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17890 "(WireSpeed[%d], EEE[%d])\n",
17891 tg3_phy_string(tp), ethtype,
17892 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17893 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17896 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17897 (dev->features & NETIF_F_RXCSUM) != 0,
17898 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17899 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17900 tg3_flag(tp, ENABLE_ASF) != 0,
17901 tg3_flag(tp, TSO_CAPABLE) != 0);
17902 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17904 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17905 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17907 pci_save_state(pdev);
17913 iounmap(tp->aperegs);
17914 tp->aperegs = NULL;
17927 pci_release_regions(pdev);
17929 err_out_disable_pdev:
17930 if (pci_is_enabled(pdev))
17931 pci_disable_device(pdev);
17935 static void tg3_remove_one(struct pci_dev *pdev)
17937 struct net_device *dev = pci_get_drvdata(pdev);
17940 struct tg3 *tp = netdev_priv(dev);
17944 release_firmware(tp->fw);
17946 tg3_reset_task_cancel(tp);
17948 if (tg3_flag(tp, USE_PHYLIB)) {
17953 unregister_netdev(dev);
17955 iounmap(tp->aperegs);
17956 tp->aperegs = NULL;
17963 pci_release_regions(pdev);
17964 pci_disable_device(pdev);
17968 #ifdef CONFIG_PM_SLEEP
17969 static int tg3_suspend(struct device *device)
17971 struct net_device *dev = dev_get_drvdata(device);
17972 struct tg3 *tp = netdev_priv(dev);
17977 if (!netif_running(dev))
17980 tg3_reset_task_cancel(tp);
17982 tg3_netif_stop(tp);
17984 tg3_timer_stop(tp);
17986 tg3_full_lock(tp, 1);
17987 tg3_disable_ints(tp);
17988 tg3_full_unlock(tp);
17990 netif_device_detach(dev);
17992 tg3_full_lock(tp, 0);
17993 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17994 tg3_flag_clear(tp, INIT_COMPLETE);
17995 tg3_full_unlock(tp);
17997 err = tg3_power_down_prepare(tp);
18001 tg3_full_lock(tp, 0);
18003 tg3_flag_set(tp, INIT_COMPLETE);
18004 err2 = tg3_restart_hw(tp, true);
18008 tg3_timer_start(tp);
18010 netif_device_attach(dev);
18011 tg3_netif_start(tp);
18014 tg3_full_unlock(tp);
18025 static int tg3_resume(struct device *device)
18027 struct net_device *dev = dev_get_drvdata(device);
18028 struct tg3 *tp = netdev_priv(dev);
18033 if (!netif_running(dev))
18036 netif_device_attach(dev);
18038 tg3_full_lock(tp, 0);
18040 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18042 tg3_flag_set(tp, INIT_COMPLETE);
18043 err = tg3_restart_hw(tp,
18044 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18048 tg3_timer_start(tp);
18050 tg3_netif_start(tp);
18053 tg3_full_unlock(tp);
18062 #endif /* CONFIG_PM_SLEEP */
18064 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18066 static void tg3_shutdown(struct pci_dev *pdev)
18068 struct net_device *dev = pci_get_drvdata(pdev);
18069 struct tg3 *tp = netdev_priv(dev);
18071 tg3_reset_task_cancel(tp);
18075 netif_device_detach(dev);
18077 if (netif_running(dev))
18080 tg3_power_down(tp);
18084 pci_disable_device(pdev);
18088 * tg3_io_error_detected - called when PCI error is detected
18089 * @pdev: Pointer to PCI device
18090 * @state: The current pci connection state
18092 * This function is called after a PCI bus error affecting
18093 * this device has been detected.
18095 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18096 pci_channel_state_t state)
18098 struct net_device *netdev = pci_get_drvdata(pdev);
18099 struct tg3 *tp = netdev_priv(netdev);
18100 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18102 netdev_info(netdev, "PCI I/O error detected\n");
18104 /* Want to make sure that the reset task doesn't run */
18105 tg3_reset_task_cancel(tp);
18109 /* Could be second call or maybe we don't have netdev yet */
18110 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18113 /* We needn't recover from permanent error */
18114 if (state == pci_channel_io_frozen)
18115 tp->pcierr_recovery = true;
18119 tg3_netif_stop(tp);
18121 tg3_timer_stop(tp);
18123 netif_device_detach(netdev);
18125 /* Clean up software state, even if MMIO is blocked */
18126 tg3_full_lock(tp, 0);
18127 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18128 tg3_full_unlock(tp);
18131 if (state == pci_channel_io_perm_failure) {
18133 tg3_napi_enable(tp);
18136 err = PCI_ERS_RESULT_DISCONNECT;
18138 pci_disable_device(pdev);
18147 * tg3_io_slot_reset - called after the pci bus has been reset.
18148 * @pdev: Pointer to PCI device
18150 * Restart the card from scratch, as if from a cold-boot.
18151 * At this point, the card has exprienced a hard reset,
18152 * followed by fixups by BIOS, and has its config space
18153 * set up identically to what it was at cold boot.
18155 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18157 struct net_device *netdev = pci_get_drvdata(pdev);
18158 struct tg3 *tp = netdev_priv(netdev);
18159 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18164 if (pci_enable_device(pdev)) {
18165 dev_err(&pdev->dev,
18166 "Cannot re-enable PCI device after reset.\n");
18170 pci_set_master(pdev);
18171 pci_restore_state(pdev);
18172 pci_save_state(pdev);
18174 if (!netdev || !netif_running(netdev)) {
18175 rc = PCI_ERS_RESULT_RECOVERED;
18179 err = tg3_power_up(tp);
18183 rc = PCI_ERS_RESULT_RECOVERED;
18186 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18187 tg3_napi_enable(tp);
18196 * tg3_io_resume - called when traffic can start flowing again.
18197 * @pdev: Pointer to PCI device
18199 * This callback is called when the error recovery driver tells
18200 * us that its OK to resume normal operation.
18202 static void tg3_io_resume(struct pci_dev *pdev)
18204 struct net_device *netdev = pci_get_drvdata(pdev);
18205 struct tg3 *tp = netdev_priv(netdev);
18210 if (!netdev || !netif_running(netdev))
18213 tg3_full_lock(tp, 0);
18214 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18215 tg3_flag_set(tp, INIT_COMPLETE);
18216 err = tg3_restart_hw(tp, true);
18218 tg3_full_unlock(tp);
18219 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18223 netif_device_attach(netdev);
18225 tg3_timer_start(tp);
18227 tg3_netif_start(tp);
18229 tg3_full_unlock(tp);
18234 tp->pcierr_recovery = false;
18238 static const struct pci_error_handlers tg3_err_handler = {
18239 .error_detected = tg3_io_error_detected,
18240 .slot_reset = tg3_io_slot_reset,
18241 .resume = tg3_io_resume
18244 static struct pci_driver tg3_driver = {
18245 .name = DRV_MODULE_NAME,
18246 .id_table = tg3_pci_tbl,
18247 .probe = tg3_init_one,
18248 .remove = tg3_remove_one,
18249 .err_handler = &tg3_err_handler,
18250 .driver.pm = &tg3_pm_ops,
18251 .shutdown = tg3_shutdown,
18254 module_pci_driver(tg3_driver);