2 * tg3.c: Broadcom Tigon3 ethernet driver.
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
74 /* Functions & macros to verify TG3_FLAGS types */
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 return test_bit(flag, bits);
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 clear_bit(flag, bits);
91 #define tg3_flag(tp, flag) \
92 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag) \
94 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag) \
96 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define DRV_MODULE_NAME "tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM 3
101 #define TG3_MIN_NUM 137
103 #define RESET_KIND_SHUTDOWN 0
104 #define RESET_KIND_INIT 1
105 #define RESET_KIND_SUSPEND 2
107 #define TG3_DEF_RX_MODE 0
108 #define TG3_DEF_TX_MODE 0
109 #define TG3_DEF_MSG_ENABLE \
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121 /* length of time before we decide the hardware is borked,
122 * and dev->tx_timeout() should be called to fix the problem
125 #define TG3_TX_TIMEOUT (5 * HZ)
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133 * You can't change the ring sizes, but you can change where you place
134 * them in the NIC onboard memory.
136 #define TG3_RX_STD_RING_SIZE(tp) \
137 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING 200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145 /* Do not place this n-ring entries value into the tp struct itself,
146 * we really want to expose these constants to GCC so that modulo et
147 * al. operations are done with shifts and masks instead of with
148 * hw multiply/modulo instructions. Another solution would be to
149 * replace things like '% foo' with '& (foo - 1)'.
152 #define TG3_TX_RING_SIZE 512
153 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155 #define TG3_RX_STD_RING_BYTES(tp) \
156 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165 #define TG3_DMA_BYTE_ENAB 64
167 #define TG3_RX_STD_DMA_SZ 1536
168 #define TG3_RX_JMB_DMA_SZ 9046
170 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182 * that are at least dword aligned when used in PCIX mode. The driver
183 * works around this bug by double copying the packet. This workaround
184 * is built into the normal double copy length check for efficiency.
186 * However, the double copy is only necessary on those architectures
187 * where unaligned memory accesses are inefficient. For those architectures
188 * where unaligned memory accesses incur little penalty, we can reintegrate
189 * the 5701 in the normal rx path. Doing so saves a device structure
190 * dereference by hardcoding the double copy threshold in place.
192 #define TG3_RX_COPY_THRESHOLD 256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K 2048
208 #define TG3_TX_BD_DMA_MAX_4K 4096
210 #define TG3_RAW_IP_ALIGN 2
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
216 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218 #define FIRMWARE_TG3 "tigon/tg3.bin"
219 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static const struct pci_device_id tg3_pci_tbl[] = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
358 static const struct {
359 const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
363 { "rx_ucast_packets" },
364 { "rx_mcast_packets" },
365 { "rx_bcast_packets" },
367 { "rx_align_errors" },
368 { "rx_xon_pause_rcvd" },
369 { "rx_xoff_pause_rcvd" },
370 { "rx_mac_ctrl_rcvd" },
371 { "rx_xoff_entered" },
372 { "rx_frame_too_long_errors" },
374 { "rx_undersize_packets" },
375 { "rx_in_length_errors" },
376 { "rx_out_length_errors" },
377 { "rx_64_or_less_octet_packets" },
378 { "rx_65_to_127_octet_packets" },
379 { "rx_128_to_255_octet_packets" },
380 { "rx_256_to_511_octet_packets" },
381 { "rx_512_to_1023_octet_packets" },
382 { "rx_1024_to_1522_octet_packets" },
383 { "rx_1523_to_2047_octet_packets" },
384 { "rx_2048_to_4095_octet_packets" },
385 { "rx_4096_to_8191_octet_packets" },
386 { "rx_8192_to_9022_octet_packets" },
393 { "tx_flow_control" },
395 { "tx_single_collisions" },
396 { "tx_mult_collisions" },
398 { "tx_excessive_collisions" },
399 { "tx_late_collisions" },
400 { "tx_collide_2times" },
401 { "tx_collide_3times" },
402 { "tx_collide_4times" },
403 { "tx_collide_5times" },
404 { "tx_collide_6times" },
405 { "tx_collide_7times" },
406 { "tx_collide_8times" },
407 { "tx_collide_9times" },
408 { "tx_collide_10times" },
409 { "tx_collide_11times" },
410 { "tx_collide_12times" },
411 { "tx_collide_13times" },
412 { "tx_collide_14times" },
413 { "tx_collide_15times" },
414 { "tx_ucast_packets" },
415 { "tx_mcast_packets" },
416 { "tx_bcast_packets" },
417 { "tx_carrier_sense_errors" },
421 { "dma_writeq_full" },
422 { "dma_write_prioq_full" },
426 { "rx_threshold_hit" },
428 { "dma_readq_full" },
429 { "dma_read_prioq_full" },
430 { "tx_comp_queue_full" },
432 { "ring_set_send_prod_index" },
433 { "ring_status_update" },
435 { "nic_avoided_irqs" },
436 { "nic_tx_threshold_hit" },
438 { "mbuf_lwm_thresh_hit" },
441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST 0
443 #define TG3_LINK_TEST 1
444 #define TG3_REGISTER_TEST 2
445 #define TG3_MEMORY_TEST 3
446 #define TG3_MAC_LOOPB_TEST 4
447 #define TG3_PHY_LOOPB_TEST 5
448 #define TG3_EXT_LOOPB_TEST 6
449 #define TG3_INTERRUPT_TEST 7
452 static const struct {
453 const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 [TG3_NVRAM_TEST] = { "nvram test (online) " },
456 [TG3_LINK_TEST] = { "link test (online) " },
457 [TG3_REGISTER_TEST] = { "register test (offline)" },
458 [TG3_MEMORY_TEST] = { "memory test (offline)" },
459 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
460 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
461 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
462 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
470 writel(val, tp->regs + off);
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
475 return readl(tp->regs + off);
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
480 writel(val, tp->aperegs + off);
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
485 return readl(tp->aperegs + off);
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 spin_lock_irqsave(&tp->indirect_lock, flags);
493 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
500 writel(val, tp->regs + off);
501 readl(tp->regs + off);
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
509 spin_lock_irqsave(&tp->indirect_lock, flags);
510 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 TG3_64BIT_REG_LOW, val);
525 if (off == TG3_RX_STD_PROD_IDX_REG) {
526 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 TG3_64BIT_REG_LOW, val);
531 spin_lock_irqsave(&tp->indirect_lock, flags);
532 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 /* In indirect mode when disabling interrupts, we also need
537 * to clear the interrupt bit in the GRC local ctrl register.
539 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
541 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
551 spin_lock_irqsave(&tp->indirect_lock, flags);
552 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559 * where it is unsafe to read back the register without some delay.
560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
565 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 /* Non-posted methods */
567 tp->write32(tp, off, val);
570 tg3_write32(tp, off, val);
575 /* Wait again after the read for the posted method to guarantee that
576 * the wait time is met.
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
584 tp->write32_mbox(tp, off, val);
585 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 !tg3_flag(tp, ICH_WORKAROUND)))
588 tp->read32_mbox(tp, off);
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
593 void __iomem *mbox = tp->regs + off;
595 if (tg3_flag(tp, TXD_MBOX_HWBUG))
597 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
604 return readl(tp->regs + off + GRCMBOX_BASE);
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
609 writel(val, tp->regs + off + GRCMBOX_BASE);
612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
618 #define tw32(reg, val) tp->write32(tp, reg, val)
619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg) tp->read32(tp, reg)
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 spin_lock_irqsave(&tp->indirect_lock, flags);
632 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
636 /* Always leave this as zero. */
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 tw32_f(TG3PCI_MEM_WIN_DATA, val);
642 /* Always leave this as zero. */
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
658 spin_lock_irqsave(&tp->indirect_lock, flags);
659 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
663 /* Always leave this as zero. */
664 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 *val = tr32(TG3PCI_MEM_WIN_DATA);
669 /* Always leave this as zero. */
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
672 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 static void tg3_ape_lock_init(struct tg3 *tp)
680 if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 regbase = TG3_APE_LOCK_GRANT;
683 regbase = TG3_APE_PER_LOCK_GRANT;
685 /* Make sure the driver hasn't any stale locks. */
686 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
688 case TG3_APE_LOCK_PHY0:
689 case TG3_APE_LOCK_PHY1:
690 case TG3_APE_LOCK_PHY2:
691 case TG3_APE_LOCK_PHY3:
692 bit = APE_LOCK_GRANT_DRIVER;
696 bit = APE_LOCK_GRANT_DRIVER;
698 bit = 1 << tp->pci_fn;
700 tg3_ape_write32(tp, regbase + 4 * i, bit);
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 u32 status, req, gnt, bit;
711 if (!tg3_flag(tp, ENABLE_APE))
715 case TG3_APE_LOCK_GPIO:
716 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 case TG3_APE_LOCK_GRC:
720 case TG3_APE_LOCK_MEM:
722 bit = APE_LOCK_REQ_DRIVER;
724 bit = 1 << tp->pci_fn;
726 case TG3_APE_LOCK_PHY0:
727 case TG3_APE_LOCK_PHY1:
728 case TG3_APE_LOCK_PHY2:
729 case TG3_APE_LOCK_PHY3:
730 bit = APE_LOCK_REQ_DRIVER;
736 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
737 req = TG3_APE_LOCK_REQ;
738 gnt = TG3_APE_LOCK_GRANT;
740 req = TG3_APE_PER_LOCK_REQ;
741 gnt = TG3_APE_PER_LOCK_GRANT;
746 tg3_ape_write32(tp, req + off, bit);
748 /* Wait for up to 1 millisecond to acquire lock. */
749 for (i = 0; i < 100; i++) {
750 status = tg3_ape_read32(tp, gnt + off);
753 if (pci_channel_offline(tp->pdev))
760 /* Revoke the lock request. */
761 tg3_ape_write32(tp, gnt + off, bit);
768 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 if (!tg3_flag(tp, ENABLE_APE))
776 case TG3_APE_LOCK_GPIO:
777 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 case TG3_APE_LOCK_GRC:
781 case TG3_APE_LOCK_MEM:
783 bit = APE_LOCK_GRANT_DRIVER;
785 bit = 1 << tp->pci_fn;
787 case TG3_APE_LOCK_PHY0:
788 case TG3_APE_LOCK_PHY1:
789 case TG3_APE_LOCK_PHY2:
790 case TG3_APE_LOCK_PHY3:
791 bit = APE_LOCK_GRANT_DRIVER;
797 if (tg3_asic_rev(tp) == ASIC_REV_5761)
798 gnt = TG3_APE_LOCK_GRANT;
800 gnt = TG3_APE_PER_LOCK_GRANT;
802 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
810 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
814 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 return timeout_us ? 0 : -EBUSY;
826 #ifdef CONFIG_TIGON3_HWMON
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
831 for (i = 0; i < timeout_us / 10; i++) {
832 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
834 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
840 return i == timeout_us / 10;
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
847 u32 i, bufoff, msgoff, maxlen, apedata;
849 if (!tg3_flag(tp, APE_HAS_NCSI))
852 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853 if (apedata != APE_SEG_SIG_MAGIC)
856 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857 if (!(apedata & APE_FW_STATUS_READY))
860 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
862 msgoff = bufoff + 2 * sizeof(u32);
863 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
868 /* Cap xfer sizes to scratchpad limits. */
869 length = (len > maxlen) ? maxlen : len;
872 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873 if (!(apedata & APE_FW_STATUS_READY))
876 /* Wait for up to 1 msec for APE to service previous event. */
877 err = tg3_ape_event_lock(tp, 1000);
881 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882 APE_EVENT_STATUS_SCRTCHPD_READ |
883 APE_EVENT_STATUS_EVENT_PENDING;
884 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
886 tg3_ape_write32(tp, bufoff, base_off);
887 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
889 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
894 if (tg3_ape_wait_for_event(tp, 30000))
897 for (i = 0; length; i += 4, length -= 4) {
898 u32 val = tg3_ape_read32(tp, msgoff + i);
899 memcpy(data, &val, sizeof(u32));
908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
913 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914 if (apedata != APE_SEG_SIG_MAGIC)
917 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918 if (!(apedata & APE_FW_STATUS_READY))
921 /* Wait for up to 20 millisecond for APE to service previous event. */
922 err = tg3_ape_event_lock(tp, 20000);
926 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927 event | APE_EVENT_STATUS_EVENT_PENDING);
929 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
940 if (!tg3_flag(tp, ENABLE_APE))
944 case RESET_KIND_INIT:
945 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
946 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
947 APE_HOST_SEG_SIG_MAGIC);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
949 APE_HOST_SEG_LEN_MAGIC);
950 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
951 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
952 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
953 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
954 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
955 APE_HOST_BEHAV_NO_PHYLOCK);
956 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
957 TG3_APE_HOST_DRVR_STATE_START);
959 event = APE_EVENT_STATUS_STATE_START;
961 case RESET_KIND_SHUTDOWN:
962 if (device_may_wakeup(&tp->pdev->dev) &&
963 tg3_flag(tp, WOL_ENABLE)) {
964 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
965 TG3_APE_HOST_WOL_SPEED_AUTO);
966 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
968 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
970 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
972 event = APE_EVENT_STATUS_STATE_UNLOAD;
978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
980 tg3_ape_send_event(tp, event);
983 static void tg3_send_ape_heartbeat(struct tg3 *tp,
984 unsigned long interval)
986 /* Check if hb interval has exceeded */
987 if (!tg3_flag(tp, ENABLE_APE) ||
988 time_before(jiffies, tp->ape_hb_jiffies + interval))
991 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
992 tp->ape_hb_jiffies = jiffies;
995 static void tg3_disable_ints(struct tg3 *tp)
999 tw32(TG3PCI_MISC_HOST_CTRL,
1000 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1001 for (i = 0; i < tp->irq_max; i++)
1002 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 static void tg3_enable_ints(struct tg3 *tp)
1012 tw32(TG3PCI_MISC_HOST_CTRL,
1013 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1015 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1016 for (i = 0; i < tp->irq_cnt; i++) {
1017 struct tg3_napi *tnapi = &tp->napi[i];
1019 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020 if (tg3_flag(tp, 1SHOT_MSI))
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 tp->coal_now |= tnapi->coal_now;
1026 /* Force an initial interrupt */
1027 if (!tg3_flag(tp, TAGGED_STATUS) &&
1028 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1029 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1031 tw32(HOSTCC_MODE, tp->coal_now);
1033 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1038 struct tg3 *tp = tnapi->tp;
1039 struct tg3_hw_status *sblk = tnapi->hw_status;
1040 unsigned int work_exists = 0;
1042 /* check for phy events */
1043 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1044 if (sblk->status & SD_STATUS_LINK_CHG)
1048 /* check for TX work to do */
1049 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 /* check for RX work to do */
1053 if (tnapi->rx_rcb_prod_idx &&
1054 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1061 * similar to tg3_enable_ints, but it accurately determines whether there
1062 * is new work pending and can return without flushing the PIO write
1063 * which reenables interrupts
1065 static void tg3_int_reenable(struct tg3_napi *tnapi)
1067 struct tg3 *tp = tnapi->tp;
1069 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1071 /* When doing tagged status, this work check is unnecessary.
1072 * The last_tag we write above tells the chip which piece of
1073 * work we've completed.
1075 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076 tw32(HOSTCC_MODE, tp->coalesce_mode |
1077 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 static void tg3_switch_clocks(struct tg3 *tp)
1083 u32 orig_clock_ctrl;
1085 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1090 orig_clock_ctrl = clock_ctrl;
1091 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092 CLOCK_CTRL_CLKRUN_OENABLE |
1094 tp->pci_clock_ctrl = clock_ctrl;
1096 if (tg3_flag(tp, 5705_PLUS)) {
1097 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1101 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1106 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 #define PHY_BUSY_LOOPS 5000
1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1122 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1124 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1128 tg3_ape_lock(tp, tp->phy_ape_lock);
1132 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133 MI_COM_PHY_ADDR_MASK);
1134 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135 MI_COM_REG_ADDR_MASK);
1136 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1138 tw32_f(MAC_MI_COM, frame_val);
1140 loops = PHY_BUSY_LOOPS;
1141 while (loops != 0) {
1143 frame_val = tr32(MAC_MI_COM);
1145 if ((frame_val & MI_COM_BUSY) == 0) {
1147 frame_val = tr32(MAC_MI_COM);
1155 *val = frame_val & MI_COM_DATA_MASK;
1159 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160 tw32_f(MAC_MI_MODE, tp->mi_mode);
1164 tg3_ape_unlock(tp, tp->phy_ape_lock);
1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1171 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1181 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1187 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1191 tg3_ape_lock(tp, tp->phy_ape_lock);
1193 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194 MI_COM_PHY_ADDR_MASK);
1195 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196 MI_COM_REG_ADDR_MASK);
1197 frame_val |= (val & MI_COM_DATA_MASK);
1198 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1200 tw32_f(MAC_MI_COM, frame_val);
1202 loops = PHY_BUSY_LOOPS;
1203 while (loops != 0) {
1205 frame_val = tr32(MAC_MI_COM);
1206 if ((frame_val & MI_COM_BUSY) == 0) {
1208 frame_val = tr32(MAC_MI_COM);
1218 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219 tw32_f(MAC_MI_MODE, tp->mi_mode);
1223 tg3_ape_unlock(tp, tp->phy_ape_lock);
1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1230 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1237 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1241 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1260 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1264 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1268 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1273 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1285 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1294 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1296 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1305 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307 MII_TG3_AUXCTL_SHDWSEL_MISC);
1309 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1316 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317 set |= MII_TG3_AUXCTL_MISC_WREN;
1319 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1327 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1333 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1335 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1345 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346 reg | val | MII_TG3_MISC_SHDW_WREN);
1349 static int tg3_bmcr_reset(struct tg3 *tp)
1354 /* OK, reset it, and poll the BMCR_RESET bit until it
1355 * clears or we time out.
1357 phy_control = BMCR_RESET;
1358 err = tg3_writephy(tp, MII_BMCR, phy_control);
1364 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1368 if ((phy_control & BMCR_RESET) == 0) {
1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1382 struct tg3 *tp = bp->priv;
1385 spin_lock_bh(&tp->lock);
1387 if (__tg3_readphy(tp, mii_id, reg, &val))
1390 spin_unlock_bh(&tp->lock);
1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1397 struct tg3 *tp = bp->priv;
1400 spin_lock_bh(&tp->lock);
1402 if (__tg3_writephy(tp, mii_id, reg, val))
1405 spin_unlock_bh(&tp->lock);
1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 struct phy_device *phydev;
1415 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417 case PHY_ID_BCM50610:
1418 case PHY_ID_BCM50610M:
1419 val = MAC_PHYCFG2_50610_LED_MODES;
1421 case PHY_ID_BCMAC131:
1422 val = MAC_PHYCFG2_AC131_LED_MODES;
1424 case PHY_ID_RTL8211C:
1425 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1427 case PHY_ID_RTL8201E:
1428 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1434 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435 tw32(MAC_PHYCFG2, val);
1437 val = tr32(MAC_PHYCFG1);
1438 val &= ~(MAC_PHYCFG1_RGMII_INT |
1439 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441 tw32(MAC_PHYCFG1, val);
1446 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448 MAC_PHYCFG2_FMODE_MASK_MASK |
1449 MAC_PHYCFG2_GMODE_MASK_MASK |
1450 MAC_PHYCFG2_ACT_MASK_MASK |
1451 MAC_PHYCFG2_QUAL_MASK_MASK |
1452 MAC_PHYCFG2_INBAND_ENABLE;
1454 tw32(MAC_PHYCFG2, val);
1456 val = tr32(MAC_PHYCFG1);
1457 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1465 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467 tw32(MAC_PHYCFG1, val);
1469 val = tr32(MAC_EXT_RGMII_MODE);
1470 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471 MAC_RGMII_MODE_RX_QUALITY |
1472 MAC_RGMII_MODE_RX_ACTIVITY |
1473 MAC_RGMII_MODE_RX_ENG_DET |
1474 MAC_RGMII_MODE_TX_ENABLE |
1475 MAC_RGMII_MODE_TX_LOWPWR |
1476 MAC_RGMII_MODE_TX_RESET);
1477 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479 val |= MAC_RGMII_MODE_RX_INT_B |
1480 MAC_RGMII_MODE_RX_QUALITY |
1481 MAC_RGMII_MODE_RX_ACTIVITY |
1482 MAC_RGMII_MODE_RX_ENG_DET;
1483 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484 val |= MAC_RGMII_MODE_TX_ENABLE |
1485 MAC_RGMII_MODE_TX_LOWPWR |
1486 MAC_RGMII_MODE_TX_RESET;
1488 tw32(MAC_EXT_RGMII_MODE, val);
1491 static void tg3_mdio_start(struct tg3 *tp)
1493 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494 tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 if (tg3_flag(tp, MDIOBUS_INITED) &&
1498 tg3_asic_rev(tp) == ASIC_REV_5785)
1499 tg3_mdio_config_5785(tp);
1502 static int tg3_mdio_init(struct tg3 *tp)
1506 struct phy_device *phydev;
1508 if (tg3_flag(tp, 5717_PLUS)) {
1511 tp->phy_addr = tp->pci_fn + 1;
1513 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1516 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517 TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 addr = ssb_gige_get_phyaddr(tp->pdev);
1526 tp->phy_addr = addr;
1528 tp->phy_addr = TG3_PHY_MII_ADDR;
1532 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 tp->mdio_bus = mdiobus_alloc();
1536 if (tp->mdio_bus == NULL)
1539 tp->mdio_bus->name = "tg3 mdio bus";
1540 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542 tp->mdio_bus->priv = tp;
1543 tp->mdio_bus->parent = &tp->pdev->dev;
1544 tp->mdio_bus->read = &tg3_mdio_read;
1545 tp->mdio_bus->write = &tg3_mdio_write;
1546 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548 /* The bus registration will look for all the PHYs on the mdio bus.
1549 * Unfortunately, it does not ensure the PHY is powered up before
1550 * accessing the PHY ID registers. A chip reset is the
1551 * quickest way to bring the device back to an operational state..
1553 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1556 i = mdiobus_register(tp->mdio_bus);
1558 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559 mdiobus_free(tp->mdio_bus);
1563 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565 if (!phydev || !phydev->drv) {
1566 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567 mdiobus_unregister(tp->mdio_bus);
1568 mdiobus_free(tp->mdio_bus);
1572 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573 case PHY_ID_BCM57780:
1574 phydev->interface = PHY_INTERFACE_MODE_GMII;
1575 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 case PHY_ID_BCM50610:
1578 case PHY_ID_BCM50610M:
1579 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580 PHY_BRCM_RX_REFCLK_UNUSED |
1581 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 case PHY_ID_RTL8211C:
1585 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 case PHY_ID_RTL8201E:
1588 case PHY_ID_BCMAC131:
1589 phydev->interface = PHY_INTERFACE_MODE_MII;
1590 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1595 tg3_flag_set(tp, MDIOBUS_INITED);
1597 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598 tg3_mdio_config_5785(tp);
1603 static void tg3_mdio_fini(struct tg3 *tp)
1605 if (tg3_flag(tp, MDIOBUS_INITED)) {
1606 tg3_flag_clear(tp, MDIOBUS_INITED);
1607 mdiobus_unregister(tp->mdio_bus);
1608 mdiobus_free(tp->mdio_bus);
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1617 val = tr32(GRC_RX_CPU_EVENT);
1618 val |= GRC_RX_CPU_DRIVER_EVENT;
1619 tw32_f(GRC_RX_CPU_EVENT, val);
1621 tp->last_event_jiffies = jiffies;
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 unsigned int delay_cnt;
1633 /* If enough time has passed, no wait is necessary. */
1634 time_remain = (long)(tp->last_event_jiffies + 1 +
1635 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 if (time_remain < 0)
1640 /* Check if we can shorten the wait time. */
1641 delay_cnt = jiffies_to_usecs(time_remain);
1642 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644 delay_cnt = (delay_cnt >> 3) + 1;
1646 for (i = 0; i < delay_cnt; i++) {
1647 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 if (pci_channel_offline(tp->pdev))
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1662 if (!tg3_readphy(tp, MII_BMCR, ®))
1664 if (!tg3_readphy(tp, MII_BMSR, ®))
1665 val |= (reg & 0xffff);
1669 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1671 if (!tg3_readphy(tp, MII_LPA, ®))
1672 val |= (reg & 0xffff);
1676 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1679 if (!tg3_readphy(tp, MII_STAT1000, ®))
1680 val |= (reg & 0xffff);
1684 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1696 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 tg3_phy_gather_ump_data(tp, data);
1701 tg3_wait_for_event_ack(tp);
1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710 tg3_generate_fw_event(tp);
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1716 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717 /* Wait for RX cpu to ACK the previous event. */
1718 tg3_wait_for_event_ack(tp);
1720 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722 tg3_generate_fw_event(tp);
1724 /* Wait for RX cpu to ACK this event. */
1725 tg3_wait_for_event_ack(tp);
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 case RESET_KIND_INIT:
1738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742 case RESET_KIND_SHUTDOWN:
1743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747 case RESET_KIND_SUSPEND:
1748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 case RESET_KIND_INIT:
1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 DRV_STATE_START_DONE);
1768 case RESET_KIND_SHUTDOWN:
1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770 DRV_STATE_UNLOAD_DONE);
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 if (tg3_flag(tp, ENABLE_ASF)) {
1784 case RESET_KIND_INIT:
1785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 case RESET_KIND_SHUTDOWN:
1790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1794 case RESET_KIND_SUSPEND:
1795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1805 static int tg3_poll_fw(struct tg3 *tp)
1810 if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 if (tg3_flag(tp, IS_SSB_CORE)) {
1814 /* We don't use firmware. */
1818 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819 /* Wait up to 20ms for init done. */
1820 for (i = 0; i < 200; i++) {
1821 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 if (pci_channel_offline(tp->pdev))
1831 /* Wait for firmware initialization to complete. */
1832 for (i = 0; i < 100000; i++) {
1833 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 if (pci_channel_offline(tp->pdev)) {
1837 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838 tg3_flag_set(tp, NO_FWARE_REPORTED);
1839 netdev_info(tp->dev, "No firmware running\n");
1848 /* Chip might not be fitted with firmware. Some Sun onboard
1849 * parts are configured like that. So don't signal the timeout
1850 * of the above loop as an error, but do report the lack of
1851 * running firmware once.
1853 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856 netdev_info(tp->dev, "No firmware running\n");
1859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860 /* The 57765 A0 needs a little more
1861 * time to do some important work.
1869 static void tg3_link_report(struct tg3 *tp)
1871 if (!netif_carrier_ok(tp->dev)) {
1872 netif_info(tp, link, tp->dev, "Link is down\n");
1873 tg3_ump_link_report(tp);
1874 } else if (netif_msg_link(tp)) {
1875 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876 (tp->link_config.active_speed == SPEED_1000 ?
1878 (tp->link_config.active_speed == SPEED_100 ?
1880 (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890 netdev_info(tp->dev, "EEE is %s\n",
1891 tp->setlpicnt ? "enabled" : "disabled");
1893 tg3_ump_link_report(tp);
1896 tp->link_up = netif_carrier_ok(tp->dev);
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1903 if (adv & ADVERTISE_PAUSE_CAP) {
1904 flowctrl |= FLOW_CTRL_RX;
1905 if (!(adv & ADVERTISE_PAUSE_ASYM))
1906 flowctrl |= FLOW_CTRL_TX;
1907 } else if (adv & ADVERTISE_PAUSE_ASYM)
1908 flowctrl |= FLOW_CTRL_TX;
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1917 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918 miireg = ADVERTISE_1000XPAUSE;
1919 else if (flow_ctrl & FLOW_CTRL_TX)
1920 miireg = ADVERTISE_1000XPSE_ASYM;
1921 else if (flow_ctrl & FLOW_CTRL_RX)
1922 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1933 if (adv & ADVERTISE_1000XPAUSE) {
1934 flowctrl |= FLOW_CTRL_RX;
1935 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936 flowctrl |= FLOW_CTRL_TX;
1937 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1938 flowctrl |= FLOW_CTRL_TX;
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1947 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950 if (lcladv & ADVERTISE_1000XPAUSE)
1952 if (rmtadv & ADVERTISE_1000XPAUSE)
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1963 u32 old_rx_mode = tp->rx_mode;
1964 u32 old_tx_mode = tp->tx_mode;
1966 if (tg3_flag(tp, USE_PHYLIB))
1967 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 autoneg = tp->link_config.autoneg;
1971 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 flowctrl = tp->link_config.flowctrl;
1979 tp->link_config.active_flowctrl = flowctrl;
1981 if (flowctrl & FLOW_CTRL_RX)
1982 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986 if (old_rx_mode != tp->rx_mode)
1987 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989 if (flowctrl & FLOW_CTRL_TX)
1990 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994 if (old_tx_mode != tp->tx_mode)
1995 tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 static void tg3_adjust_link(struct net_device *dev)
2000 u8 oldflowctrl, linkmesg = 0;
2001 u32 mac_mode, lcl_adv, rmt_adv;
2002 struct tg3 *tp = netdev_priv(dev);
2003 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005 spin_lock_bh(&tp->lock);
2007 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008 MAC_MODE_HALF_DUPLEX);
2010 oldflowctrl = tp->link_config.active_flowctrl;
2016 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017 mac_mode |= MAC_MODE_PORT_MODE_MII;
2018 else if (phydev->speed == SPEED_1000 ||
2019 tg3_asic_rev(tp) != ASIC_REV_5785)
2020 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024 if (phydev->duplex == DUPLEX_HALF)
2025 mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 lcl_adv = mii_advertise_flowctrl(
2028 tp->link_config.flowctrl);
2031 rmt_adv = LPA_PAUSE_CAP;
2032 if (phydev->asym_pause)
2033 rmt_adv |= LPA_PAUSE_ASYM;
2036 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040 if (mac_mode != tp->mac_mode) {
2041 tp->mac_mode = mac_mode;
2042 tw32_f(MAC_MODE, tp->mac_mode);
2046 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047 if (phydev->speed == SPEED_10)
2049 MAC_MI_STAT_10MBPS_MODE |
2050 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056 tw32(MAC_TX_LENGTHS,
2057 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058 (6 << TX_LENGTHS_IPG_SHIFT) |
2059 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 tw32(MAC_TX_LENGTHS,
2062 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063 (6 << TX_LENGTHS_IPG_SHIFT) |
2064 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066 if (phydev->link != tp->old_link ||
2067 phydev->speed != tp->link_config.active_speed ||
2068 phydev->duplex != tp->link_config.active_duplex ||
2069 oldflowctrl != tp->link_config.active_flowctrl)
2072 tp->old_link = phydev->link;
2073 tp->link_config.active_speed = phydev->speed;
2074 tp->link_config.active_duplex = phydev->duplex;
2076 spin_unlock_bh(&tp->lock);
2079 tg3_link_report(tp);
2082 static int tg3_phy_init(struct tg3 *tp)
2084 struct phy_device *phydev;
2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 /* Bring the PHY back to a known state. */
2092 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094 /* Attach the MAC to the PHY. */
2095 phydev = phy_connect(tp->dev, phydev_name(phydev),
2096 tg3_adjust_link, phydev->interface);
2097 if (IS_ERR(phydev)) {
2098 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099 return PTR_ERR(phydev);
2102 /* Mask with MAC supported features. */
2103 switch (phydev->interface) {
2104 case PHY_INTERFACE_MODE_GMII:
2105 case PHY_INTERFACE_MODE_RGMII:
2106 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107 phy_set_max_speed(phydev, SPEED_1000);
2108 phy_support_asym_pause(phydev);
2112 case PHY_INTERFACE_MODE_MII:
2113 phy_set_max_speed(phydev, SPEED_100);
2114 phy_support_asym_pause(phydev);
2117 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2121 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123 phy_attached_info(phydev);
2128 static void tg3_phy_start(struct tg3 *tp)
2130 struct phy_device *phydev;
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2138 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2139 phydev->speed = tp->link_config.speed;
2140 phydev->duplex = tp->link_config.duplex;
2141 phydev->autoneg = tp->link_config.autoneg;
2142 ethtool_convert_legacy_u32_to_link_mode(
2143 phydev->advertising, tp->link_config.advertising);
2148 phy_start_aneg(phydev);
2151 static void tg3_phy_stop(struct tg3 *tp)
2153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 static void tg3_phy_fini(struct tg3 *tp)
2161 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2172 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176 /* Cannot do read-modify-write on 5401 */
2177 err = tg3_phy_auxctl_write(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2184 err = tg3_phy_auxctl_read(tp,
2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2189 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190 err = tg3_phy_auxctl_write(tp,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2201 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 tg3_writephy(tp, MII_TG3_FET_TEST,
2205 phytest | MII_TG3_FET_SHADOW_EN);
2206 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2221 if (!tg3_flag(tp, 5705_PLUS) ||
2222 (tg3_flag(tp, 5717_PLUS) &&
2223 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227 tg3_phy_fet_toggle_apd(tp, enable);
2231 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2232 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2233 MII_TG3_MISC_SHDW_SCR5_SDTL |
2234 MII_TG3_MISC_SHDW_SCR5_C125OE;
2235 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2236 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2252 if (!tg3_flag(tp, 5705_PLUS) ||
2253 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2260 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262 tg3_writephy(tp, MII_TG3_FET_TEST,
2263 ephy | MII_TG3_FET_SHADOW_EN);
2264 if (!tg3_readphy(tp, reg, &phy)) {
2266 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 tg3_writephy(tp, reg, phy);
2271 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2276 ret = tg3_phy_auxctl_read(tp,
2277 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 tg3_phy_auxctl_write(tp,
2284 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2289 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2294 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2300 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 static void tg3_phy_apply_otp(struct tg3 *tp)
2312 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2316 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2317 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2320 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2321 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2324 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2325 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2328 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2331 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2334 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2335 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337 tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2343 struct ethtool_eee *dest = &tp->eee;
2345 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2351 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354 /* Pull eee_active */
2355 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2356 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2357 dest->eee_active = 1;
2359 dest->eee_active = 0;
2361 /* Pull lp advertised settings */
2362 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2366 /* Pull advertised and eee_enabled settings */
2367 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369 dest->eee_enabled = !!val;
2370 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2372 /* Pull tx_lpi_enabled */
2373 val = tr32(TG3_CPMU_EEE_MODE);
2374 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376 /* Pull lpi timer value */
2377 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2384 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2389 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391 tp->link_config.active_duplex == DUPLEX_FULL &&
2392 (tp->link_config.active_speed == SPEED_100 ||
2393 tp->link_config.active_speed == SPEED_1000)) {
2396 if (tp->link_config.active_speed == SPEED_1000)
2397 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403 tg3_eee_pull_config(tp, NULL);
2404 if (tp->eee.eee_active)
2408 if (!tp->setlpicnt) {
2409 if (current_link_up &&
2410 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2411 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2412 tg3_phy_toggle_auxctl_smdsp(tp, false);
2415 val = tr32(TG3_CPMU_EEE_MODE);
2416 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2420 static void tg3_phy_eee_enable(struct tg3 *tp)
2424 if (tp->link_config.active_speed == SPEED_1000 &&
2425 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2426 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2427 tg3_flag(tp, 57765_CLASS)) &&
2428 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2429 val = MII_TG3_DSP_TAP26_ALNOKO |
2430 MII_TG3_DSP_TAP26_RMRXSTO;
2431 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2432 tg3_phy_toggle_auxctl_smdsp(tp, false);
2435 val = tr32(TG3_CPMU_EEE_MODE);
2436 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 static int tg3_wait_macro_done(struct tg3 *tp)
2446 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2447 if ((tmp32 & 0x1000) == 0)
2457 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 static const u32 test_pat[4][6] = {
2460 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2461 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2462 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2463 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2467 for (chan = 0; chan < 4; chan++) {
2470 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2471 (chan * 0x2000) | 0x0200);
2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474 for (i = 0; i < 6; i++)
2475 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2479 if (tg3_wait_macro_done(tp)) {
2484 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485 (chan * 0x2000) | 0x0200);
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2487 if (tg3_wait_macro_done(tp)) {
2492 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2493 if (tg3_wait_macro_done(tp)) {
2498 for (i = 0; i < 6; i += 2) {
2501 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2502 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2503 tg3_wait_macro_done(tp)) {
2509 if (low != test_pat[chan][i] ||
2510 high != test_pat[chan][i+1]) {
2511 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2512 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2523 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2527 for (chan = 0; chan < 4; chan++) {
2530 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2531 (chan * 0x2000) | 0x0200);
2532 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2533 for (i = 0; i < 6; i++)
2534 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2535 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2536 if (tg3_wait_macro_done(tp))
2543 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 u32 reg32, phy9_orig;
2546 int retries, do_phy_reset, err;
2552 err = tg3_bmcr_reset(tp);
2558 /* Disable transmitter and interrupt. */
2559 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2563 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565 /* Set full-duplex, 1000 mbps. */
2566 tg3_writephy(tp, MII_BMCR,
2567 BMCR_FULLDPLX | BMCR_SPEED1000);
2569 /* Set to master mode. */
2570 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573 tg3_writephy(tp, MII_CTRL1000,
2574 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2580 /* Block the PHY control access. */
2581 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586 } while (--retries);
2588 err = tg3_phy_reset_chanpat(tp);
2592 tg3_phydsp_write(tp, 0x8005, 0x0000);
2594 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2595 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597 tg3_phy_toggle_auxctl_smdsp(tp, false);
2599 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2606 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2611 static void tg3_carrier_off(struct tg3 *tp)
2613 netif_carrier_off(tp->dev);
2614 tp->link_up = false;
2617 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 if (tg3_flag(tp, ENABLE_ASF))
2620 netdev_warn(tp->dev,
2621 "Management side-band traffic will be interrupted during phy settings change\n");
2624 /* This will reset the tigon3 PHY if there is no valid
2625 * link unless the FORCE argument is non-zero.
2627 static int tg3_phy_reset(struct tg3 *tp)
2632 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2633 val = tr32(GRC_MISC_CFG);
2634 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637 err = tg3_readphy(tp, MII_BMSR, &val);
2638 err |= tg3_readphy(tp, MII_BMSR, &val);
2642 if (netif_running(tp->dev) && tp->link_up) {
2643 netif_carrier_off(tp->dev);
2644 tg3_link_report(tp);
2647 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2648 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5705) {
2650 err = tg3_phy_reset_5703_4_5(tp);
2657 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2658 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2659 cpmuctrl = tr32(TG3_CPMU_CTRL);
2660 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665 err = tg3_bmcr_reset(tp);
2669 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2670 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2671 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673 tw32(TG3_CPMU_CTRL, cpmuctrl);
2676 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2677 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2678 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2679 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2680 CPMU_LSPD_1000MB_MACCLK_12_5) {
2681 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2687 if (tg3_flag(tp, 5717_PLUS) &&
2688 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691 tg3_phy_apply_otp(tp);
2693 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2694 tg3_phy_toggle_apd(tp, true);
2696 tg3_phy_toggle_apd(tp, false);
2699 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2700 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2702 tg3_phydsp_write(tp, 0x000a, 0x0323);
2703 tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2707 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2712 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 tg3_phydsp_write(tp, 0x000a, 0x310b);
2714 tg3_phydsp_write(tp, 0x201f, 0x9506);
2715 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2716 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2719 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2721 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2722 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2723 tg3_writephy(tp, MII_TG3_TEST1,
2724 MII_TG3_TEST1_TRIM_EN | 0x4);
2726 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728 tg3_phy_toggle_auxctl_smdsp(tp, false);
2732 /* Set Extended packet length bit (bit 14) on all chips that */
2733 /* support jumbo frames */
2734 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2735 /* Cannot do read-modify-write on 5401 */
2736 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2737 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738 /* Set bit 14 with read-modify-write to preserve other bits */
2739 err = tg3_phy_auxctl_read(tp,
2740 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2743 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2747 * jumbo frames transmission.
2749 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2751 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2752 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2756 /* adjust output voltage */
2757 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2761 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763 tg3_phy_toggle_automdix(tp, true);
2764 tg3_phy_set_wirespeed(tp);
2768 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2769 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2770 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2771 TG3_GPIO_MSG_NEED_VAUX)
2772 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2773 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2774 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 12))
2778 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2779 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2780 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 12))
2784 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2788 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2789 tg3_asic_rev(tp) == ASIC_REV_5719)
2790 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792 status = tr32(TG3_CPMU_DRV_STATUS);
2794 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2795 status &= ~(TG3_GPIO_MSG_MASK << shift);
2796 status |= (newstat << shift);
2798 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2799 tg3_asic_rev(tp) == ASIC_REV_5719)
2800 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802 tw32(TG3_CPMU_DRV_STATUS, status);
2804 return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 if (!tg3_flag(tp, IS_NIC))
2812 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5720) {
2815 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2821 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826 TG3_GRC_LCLCTL_PWRSW_DELAY);
2832 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2836 if (!tg3_flag(tp, IS_NIC) ||
2837 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2838 tg3_asic_rev(tp) == ASIC_REV_5701)
2841 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843 tw32_wait_f(GRC_LOCAL_CTRL,
2844 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2845 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847 tw32_wait_f(GRC_LOCAL_CTRL,
2849 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851 tw32_wait_f(GRC_LOCAL_CTRL,
2852 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2853 TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 if (!tg3_flag(tp, IS_NIC))
2861 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2862 tg3_asic_rev(tp) == ASIC_REV_5701) {
2863 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2864 (GRC_LCLCTRL_GPIO_OE0 |
2865 GRC_LCLCTRL_GPIO_OE1 |
2866 GRC_LCLCTRL_GPIO_OE2 |
2867 GRC_LCLCTRL_GPIO_OUTPUT0 |
2868 GRC_LCLCTRL_GPIO_OUTPUT1),
2869 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2871 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2872 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2873 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2874 GRC_LCLCTRL_GPIO_OE1 |
2875 GRC_LCLCTRL_GPIO_OE2 |
2876 GRC_LCLCTRL_GPIO_OUTPUT0 |
2877 GRC_LCLCTRL_GPIO_OUTPUT1 |
2879 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2880 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2883 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2884 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2887 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 u32 grc_local_ctrl = 0;
2893 /* Workaround to prevent overdrawing Amps. */
2894 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2895 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2896 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898 TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 /* On 5753 and variants, GPIO2 cannot be used. */
2902 no_gpio2 = tp->nic_sram_data_cfg &
2903 NIC_SRAM_DATA_CFG_NO_GPIO2;
2905 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2906 GRC_LCLCTRL_GPIO_OE1 |
2907 GRC_LCLCTRL_GPIO_OE2 |
2908 GRC_LCLCTRL_GPIO_OUTPUT1 |
2909 GRC_LCLCTRL_GPIO_OUTPUT2;
2911 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2912 GRC_LCLCTRL_GPIO_OUTPUT2);
2914 tw32_wait_f(GRC_LOCAL_CTRL,
2915 tp->grc_local_ctrl | grc_local_ctrl,
2916 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920 tw32_wait_f(GRC_LOCAL_CTRL,
2921 tp->grc_local_ctrl | grc_local_ctrl,
2922 TG3_GRC_LCLCTL_PWRSW_DELAY);
2925 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2926 tw32_wait_f(GRC_LOCAL_CTRL,
2927 tp->grc_local_ctrl | grc_local_ctrl,
2928 TG3_GRC_LCLCTL_PWRSW_DELAY);
2933 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2937 /* Serialize power state transitions */
2938 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2942 msg = TG3_GPIO_MSG_NEED_VAUX;
2944 msg = tg3_set_function_status(tp, msg);
2946 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2950 tg3_pwrsrc_switch_to_vaux(tp);
2952 tg3_pwrsrc_die_with_vmain(tp);
2955 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 bool need_vaux = false;
2962 /* The GPIOs do something completely different on 57765. */
2963 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2967 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5720) {
2969 tg3_frob_aux_power_5717(tp, include_wol ?
2970 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2974 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2975 struct net_device *dev_peer;
2977 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979 /* remove_one() may have been run on the peer. */
2981 struct tg3 *tp_peer = netdev_priv(dev_peer);
2983 if (tg3_flag(tp_peer, INIT_COMPLETE))
2986 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2987 tg3_flag(tp_peer, ENABLE_ASF))
2992 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2993 tg3_flag(tp, ENABLE_ASF))
2997 tg3_pwrsrc_switch_to_vaux(tp);
2999 tg3_pwrsrc_die_with_vmain(tp);
3002 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3007 if (speed != SPEED_10)
3009 } else if (speed == SPEED_10)
3015 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 switch (tg3_asic_rev(tp)) {
3022 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3031 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3040 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 switch (tg3_asic_rev(tp)) {
3045 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3054 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3058 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3062 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3063 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3064 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3068 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3069 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3074 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076 val = tr32(GRC_MISC_CFG);
3077 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085 tg3_writephy(tp, MII_ADVERTISE, 0);
3086 tg3_writephy(tp, MII_BMCR,
3087 BMCR_ANENABLE | BMCR_ANRESTART);
3089 tg3_writephy(tp, MII_TG3_FET_TEST,
3090 phytest | MII_TG3_FET_SHADOW_EN);
3091 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3092 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094 MII_TG3_FET_SHDW_AUXMODE4,
3097 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100 } else if (do_low_power) {
3101 if (!tg3_phy_led_bug(tp))
3102 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3103 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3106 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3107 MII_TG3_AUXCTL_PCTL_VREG_11V;
3108 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111 /* The PHY should not be powered down on some chips because
3114 if (tg3_phy_power_bug(tp))
3117 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3118 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3119 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3120 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3121 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3122 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 /* tp->lock is held. */
3129 static int tg3_nvram_lock(struct tg3 *tp)
3131 if (tg3_flag(tp, NVRAM)) {
3134 if (tp->nvram_lock_cnt == 0) {
3135 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3136 for (i = 0; i < 8000; i++) {
3137 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3142 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3146 tp->nvram_lock_cnt++;
3151 /* tp->lock is held. */
3152 static void tg3_nvram_unlock(struct tg3 *tp)
3154 if (tg3_flag(tp, NVRAM)) {
3155 if (tp->nvram_lock_cnt > 0)
3156 tp->nvram_lock_cnt--;
3157 if (tp->nvram_lock_cnt == 0)
3158 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3162 /* tp->lock is held. */
3163 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3166 u32 nvaccess = tr32(NVRAM_ACCESS);
3168 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3172 /* tp->lock is held. */
3173 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3176 u32 nvaccess = tr32(NVRAM_ACCESS);
3178 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3182 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3183 u32 offset, u32 *val)
3188 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3192 EEPROM_ADDR_DEVID_MASK |
3194 tw32(GRC_EEPROM_ADDR,
3196 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3197 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3198 EEPROM_ADDR_ADDR_MASK) |
3199 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201 for (i = 0; i < 1000; i++) {
3202 tmp = tr32(GRC_EEPROM_ADDR);
3204 if (tmp & EEPROM_ADDR_COMPLETE)
3208 if (!(tmp & EEPROM_ADDR_COMPLETE))
3211 tmp = tr32(GRC_EEPROM_DATA);
3214 * The data will always be opposite the native endian
3215 * format. Perform a blind byteswap to compensate.
3222 #define NVRAM_CMD_TIMEOUT 10000
3224 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3228 tw32(NVRAM_CMD, nvram_cmd);
3229 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3230 usleep_range(10, 40);
3231 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3237 if (i == NVRAM_CMD_TIMEOUT)
3243 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 if (tg3_flag(tp, NVRAM) &&
3246 tg3_flag(tp, NVRAM_BUFFERED) &&
3247 tg3_flag(tp, FLASH) &&
3248 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3249 (tp->nvram_jedecnum == JEDEC_ATMEL))
3251 addr = ((addr / tp->nvram_pagesize) <<
3252 ATMEL_AT45DB0X1B_PAGE_POS) +
3253 (addr % tp->nvram_pagesize);
3258 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 if (tg3_flag(tp, NVRAM) &&
3261 tg3_flag(tp, NVRAM_BUFFERED) &&
3262 tg3_flag(tp, FLASH) &&
3263 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3264 (tp->nvram_jedecnum == JEDEC_ATMEL))
3266 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3267 tp->nvram_pagesize) +
3268 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3273 /* NOTE: Data read in from NVRAM is byteswapped according to
3274 * the byteswapping settings for all other register accesses.
3275 * tg3 devices are BE devices, so on a BE machine, the data
3276 * returned will be exactly as it is seen in NVRAM. On a LE
3277 * machine, the 32-bit value will be byteswapped.
3279 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3283 if (!tg3_flag(tp, NVRAM))
3284 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286 offset = tg3_nvram_phys_addr(tp, offset);
3288 if (offset > NVRAM_ADDR_MSK)
3291 ret = tg3_nvram_lock(tp);
3295 tg3_enable_nvram_access(tp);
3297 tw32(NVRAM_ADDR, offset);
3298 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3299 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302 *val = tr32(NVRAM_RDDATA);
3304 tg3_disable_nvram_access(tp);
3306 tg3_nvram_unlock(tp);
3311 /* Ensures NVRAM data is in bytestream format. */
3312 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 int res = tg3_nvram_read(tp, offset, &v);
3317 *val = cpu_to_be32(v);
3321 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3322 u32 offset, u32 len, u8 *buf)
3327 for (i = 0; i < len; i += 4) {
3333 memcpy(&data, buf + i, 4);
3336 * The SEEPROM interface expects the data to always be opposite
3337 * the native endian format. We accomplish this by reversing
3338 * all the operations that would have been performed on the
3339 * data from a call to tg3_nvram_read_be32().
3341 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343 val = tr32(GRC_EEPROM_ADDR);
3344 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348 tw32(GRC_EEPROM_ADDR, val |
3349 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3350 (addr & EEPROM_ADDR_ADDR_MASK) |
3354 for (j = 0; j < 1000; j++) {
3355 val = tr32(GRC_EEPROM_ADDR);
3357 if (val & EEPROM_ADDR_COMPLETE)
3361 if (!(val & EEPROM_ADDR_COMPLETE)) {
3370 /* offset and length are dword aligned */
3371 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3375 u32 pagesize = tp->nvram_pagesize;
3376 u32 pagemask = pagesize - 1;
3380 tmp = kmalloc(pagesize, GFP_KERNEL);
3386 u32 phy_addr, page_off, size;
3388 phy_addr = offset & ~pagemask;
3390 for (j = 0; j < pagesize; j += 4) {
3391 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3392 (__be32 *) (tmp + j));
3399 page_off = offset & pagemask;
3406 memcpy(tmp + page_off, buf, size);
3408 offset = offset + (pagesize - page_off);
3410 tg3_enable_nvram_access(tp);
3413 * Before we can erase the flash page, we need
3414 * to issue a special "write enable" command.
3416 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 /* Erase the target page */
3422 tw32(NVRAM_ADDR, phy_addr);
3424 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3425 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 /* Issue another write enable to start the write. */
3431 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 for (j = 0; j < pagesize; j += 4) {
3439 data = *((__be32 *) (tmp + j));
3441 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443 tw32(NVRAM_ADDR, phy_addr + j);
3445 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3449 nvram_cmd |= NVRAM_CMD_FIRST;
3450 else if (j == (pagesize - 4))
3451 nvram_cmd |= NVRAM_CMD_LAST;
3453 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3461 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3462 tg3_nvram_exec_cmd(tp, nvram_cmd);
3469 /* offset and length are dword aligned */
3470 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3475 for (i = 0; i < len; i += 4, offset += 4) {
3476 u32 page_off, phy_addr, nvram_cmd;
3479 memcpy(&data, buf + i, 4);
3480 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482 page_off = offset % tp->nvram_pagesize;
3484 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488 if (page_off == 0 || i == 0)
3489 nvram_cmd |= NVRAM_CMD_FIRST;
3490 if (page_off == (tp->nvram_pagesize - 4))
3491 nvram_cmd |= NVRAM_CMD_LAST;
3494 nvram_cmd |= NVRAM_CMD_LAST;
3496 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3497 !tg3_flag(tp, FLASH) ||
3498 !tg3_flag(tp, 57765_PLUS))
3499 tw32(NVRAM_ADDR, phy_addr);
3501 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3502 !tg3_flag(tp, 5755_PLUS) &&
3503 (tp->nvram_jedecnum == JEDEC_ST) &&
3504 (nvram_cmd & NVRAM_CMD_FIRST)) {
3507 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3508 ret = tg3_nvram_exec_cmd(tp, cmd);
3512 if (!tg3_flag(tp, FLASH)) {
3513 /* We always do complete word writes to eeprom. */
3514 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3524 /* offset and length are dword aligned */
3525 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3529 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3530 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3531 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3535 if (!tg3_flag(tp, NVRAM)) {
3536 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3540 ret = tg3_nvram_lock(tp);
3544 tg3_enable_nvram_access(tp);
3545 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3546 tw32(NVRAM_WRITE1, 0x406);
3548 grc_mode = tr32(GRC_MODE);
3549 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3552 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3559 grc_mode = tr32(GRC_MODE);
3560 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562 tg3_disable_nvram_access(tp);
3563 tg3_nvram_unlock(tp);
3566 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3567 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3574 #define RX_CPU_SCRATCH_BASE 0x30000
3575 #define RX_CPU_SCRATCH_SIZE 0x04000
3576 #define TX_CPU_SCRATCH_BASE 0x34000
3577 #define TX_CPU_SCRATCH_SIZE 0x04000
3579 /* tp->lock is held. */
3580 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 const int iters = 10000;
3585 for (i = 0; i < iters; i++) {
3586 tw32(cpu_base + CPU_STATE, 0xffffffff);
3587 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3588 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590 if (pci_channel_offline(tp->pdev))
3594 return (i == iters) ? -EBUSY : 0;
3597 /* tp->lock is held. */
3598 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3603 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3609 /* tp->lock is held. */
3610 static int tg3_txcpu_pause(struct tg3 *tp)
3612 return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 /* tp->lock is held. */
3616 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 tw32(cpu_base + CPU_STATE, 0xffffffff);
3619 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3622 /* tp->lock is held. */
3623 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 tg3_resume_cpu(tp, RX_CPU_BASE);
3628 /* tp->lock is held. */
3629 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3633 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3636 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641 if (cpu_base == RX_CPU_BASE) {
3642 rc = tg3_rxcpu_pause(tp);
3645 * There is only an Rx CPU for the 5750 derivative in the
3648 if (tg3_flag(tp, IS_SSB_CORE))
3651 rc = tg3_txcpu_pause(tp);
3655 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3656 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3660 /* Clear firmware's nvram arbitration. */
3661 if (tg3_flag(tp, NVRAM))
3662 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3666 static int tg3_fw_data_len(struct tg3 *tp,
3667 const struct tg3_firmware_hdr *fw_hdr)
3671 /* Non fragmented firmware have one firmware header followed by a
3672 * contiguous chunk of data to be written. The length field in that
3673 * header is not the length of data to be written but the complete
3674 * length of the bss. The data length is determined based on
3675 * tp->fw->size minus headers.
3677 * Fragmented firmware have a main header followed by multiple
3678 * fragments. Each fragment is identical to non fragmented firmware
3679 * with a firmware header followed by a contiguous chunk of data. In
3680 * the main header, the length field is unused and set to 0xffffffff.
3681 * In each fragment header the length is the entire size of that
3682 * fragment i.e. fragment data + header length. Data length is
3683 * therefore length field in the header minus TG3_FW_HDR_LEN.
3685 if (tp->fw_len == 0xffffffff)
3686 fw_len = be32_to_cpu(fw_hdr->len);
3688 fw_len = tp->fw->size;
3690 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 /* tp->lock is held. */
3694 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3695 u32 cpu_scratch_base, int cpu_scratch_size,
3696 const struct tg3_firmware_hdr *fw_hdr)
3699 void (*write_op)(struct tg3 *, u32, u32);
3700 int total_len = tp->fw->size;
3702 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704 "%s: Trying to load TX cpu firmware which is 5705\n",
3709 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3710 write_op = tg3_write_mem;
3712 write_op = tg3_write_indirect_reg32;
3714 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3715 /* It is possible that bootcode is still loading at this point.
3716 * Get the nvram lock first before halting the cpu.
3718 int lock_err = tg3_nvram_lock(tp);
3719 err = tg3_halt_cpu(tp, cpu_base);
3721 tg3_nvram_unlock(tp);
3725 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3726 write_op(tp, cpu_scratch_base + i, 0);
3727 tw32(cpu_base + CPU_STATE, 0xffffffff);
3728 tw32(cpu_base + CPU_MODE,
3729 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731 /* Subtract additional main header for fragmented firmware and
3732 * advance to the first fragment
3734 total_len -= TG3_FW_HDR_LEN;
3739 u32 *fw_data = (u32 *)(fw_hdr + 1);
3740 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3741 write_op(tp, cpu_scratch_base +
3742 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744 be32_to_cpu(fw_data[i]));
3746 total_len -= be32_to_cpu(fw_hdr->len);
3748 /* Advance to next fragment */
3749 fw_hdr = (struct tg3_firmware_hdr *)
3750 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3751 } while (total_len > 0);
3759 /* tp->lock is held. */
3760 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 const int iters = 5;
3765 tw32(cpu_base + CPU_STATE, 0xffffffff);
3766 tw32_f(cpu_base + CPU_PC, pc);
3768 for (i = 0; i < iters; i++) {
3769 if (tr32(cpu_base + CPU_PC) == pc)
3771 tw32(cpu_base + CPU_STATE, 0xffffffff);
3772 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3773 tw32_f(cpu_base + CPU_PC, pc);
3777 return (i == iters) ? -EBUSY : 0;
3780 /* tp->lock is held. */
3781 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 const struct tg3_firmware_hdr *fw_hdr;
3786 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788 /* Firmware blob starts with version numbers, followed by
3789 start address and length. We are setting complete length.
3790 length = end_address_of_bss - start_address_of_text.
3791 Remainder is the blob to be loaded contiguously
3792 from start address. */
3794 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3795 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3800 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3801 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3806 /* Now startup only the RX cpu. */
3807 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3808 be32_to_cpu(fw_hdr->base_addr));
3810 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3811 "should be %08x\n", __func__,
3812 tr32(RX_CPU_BASE + CPU_PC),
3813 be32_to_cpu(fw_hdr->base_addr));
3817 tg3_rxcpu_resume(tp);
3822 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 const int iters = 1000;
3828 /* Wait for boot code to complete initialization and enter service
3829 * loop. It is then safe to download service patches
3831 for (i = 0; i < iters; i++) {
3832 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3839 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3843 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845 netdev_warn(tp->dev,
3846 "Other patches exist. Not downloading EEE patch\n");
3853 /* tp->lock is held. */
3854 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 struct tg3_firmware_hdr *fw_hdr;
3858 if (!tg3_flag(tp, NO_NVRAM))
3861 if (tg3_validate_rxcpu_state(tp))
3867 /* This firmware blob has a different format than older firmware
3868 * releases as given below. The main difference is we have fragmented
3869 * data to be written to non-contiguous locations.
3871 * In the beginning we have a firmware header identical to other
3872 * firmware which consists of version, base addr and length. The length
3873 * here is unused and set to 0xffffffff.
3875 * This is followed by a series of firmware fragments which are
3876 * individually identical to previous firmware. i.e. they have the
3877 * firmware header and followed by data for that fragment. The version
3878 * field of the individual fragment header is unused.
3881 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3882 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885 if (tg3_rxcpu_pause(tp))
3888 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3889 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891 tg3_rxcpu_resume(tp);
3894 /* tp->lock is held. */
3895 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 const struct tg3_firmware_hdr *fw_hdr;
3898 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901 if (!tg3_flag(tp, FW_TSO))
3904 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906 /* Firmware blob starts with version numbers, followed by
3907 start address and length. We are setting complete length.
3908 length = end_address_of_bss - start_address_of_text.
3909 Remainder is the blob to be loaded contiguously
3910 from start address. */
3912 cpu_scratch_size = tp->fw_len;
3914 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3915 cpu_base = RX_CPU_BASE;
3916 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918 cpu_base = TX_CPU_BASE;
3919 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3920 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923 err = tg3_load_firmware_cpu(tp, cpu_base,
3924 cpu_scratch_base, cpu_scratch_size,
3929 /* Now startup the cpu. */
3930 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3931 be32_to_cpu(fw_hdr->base_addr));
3934 "%s fails to set CPU PC, is %08x should be %08x\n",
3935 __func__, tr32(cpu_base + CPU_PC),
3936 be32_to_cpu(fw_hdr->base_addr));
3940 tg3_resume_cpu(tp, cpu_base);
3944 /* tp->lock is held. */
3945 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3948 u32 addr_high, addr_low;
3950 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3951 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3952 (mac_addr[4] << 8) | mac_addr[5]);
3955 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3956 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3960 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3964 /* tp->lock is held. */
3965 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3970 for (i = 0; i < 4; i++) {
3971 if (i == 1 && skip_mac_1)
3973 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3977 tg3_asic_rev(tp) == ASIC_REV_5704) {
3978 for (i = 4; i < 16; i++)
3979 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 addr_high = (tp->dev->dev_addr[0] +
3983 tp->dev->dev_addr[1] +
3984 tp->dev->dev_addr[2] +
3985 tp->dev->dev_addr[3] +
3986 tp->dev->dev_addr[4] +
3987 tp->dev->dev_addr[5]) &
3988 TX_BACKOFF_SEED_MASK;
3989 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 static void tg3_enable_register_access(struct tg3 *tp)
3995 * Make sure register accesses (indirect or otherwise) will function
3998 pci_write_config_dword(tp->pdev,
3999 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 static int tg3_power_up(struct tg3 *tp)
4006 tg3_enable_register_access(tp);
4008 err = pci_set_power_state(tp->pdev, PCI_D0);
4010 /* Switch out of Vaux if it is a NIC */
4011 tg3_pwrsrc_switch_to_vmain(tp);
4013 netdev_err(tp->dev, "Transition to D0 failed\n");
4019 static int tg3_setup_phy(struct tg3 *, bool);
4021 static int tg3_power_down_prepare(struct tg3 *tp)
4024 bool device_should_wake, do_low_power;
4026 tg3_enable_register_access(tp);
4028 /* Restore the CLKREQ setting. */
4029 if (tg3_flag(tp, CLKREQ_BUG))
4030 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4031 PCI_EXP_LNKCTL_CLKREQ_EN);
4033 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4034 tw32(TG3PCI_MISC_HOST_CTRL,
4035 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4038 tg3_flag(tp, WOL_ENABLE);
4040 if (tg3_flag(tp, USE_PHYLIB)) {
4041 do_low_power = false;
4042 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4043 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4044 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4045 struct phy_device *phydev;
4048 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052 tp->link_config.speed = phydev->speed;
4053 tp->link_config.duplex = phydev->duplex;
4054 tp->link_config.autoneg = phydev->autoneg;
4055 ethtool_convert_link_mode_to_legacy_u32(
4056 &tp->link_config.advertising,
4057 phydev->advertising);
4059 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4060 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4067 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4068 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4069 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4081 linkmode_copy(phydev->advertising, advertising);
4082 phy_start_aneg(phydev);
4084 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4085 if (phyid != PHY_ID_BCMAC131) {
4086 phyid &= PHY_BCM_OUI_MASK;
4087 if (phyid == PHY_BCM_OUI_1 ||
4088 phyid == PHY_BCM_OUI_2 ||
4089 phyid == PHY_BCM_OUI_3)
4090 do_low_power = true;
4094 do_low_power = true;
4096 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4097 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4100 tg3_setup_phy(tp, false);
4103 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 val = tr32(GRC_VCPU_EXT_CTRL);
4107 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4108 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4112 for (i = 0; i < 200; i++) {
4113 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4114 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4119 if (tg3_flag(tp, WOL_CAP))
4120 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4121 WOL_DRV_STATE_SHUTDOWN |
4125 if (device_should_wake) {
4128 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4131 tg3_phy_auxctl_write(tp,
4132 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4133 MII_TG3_AUXCTL_PCTL_WOL_EN |
4134 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4135 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4139 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4140 mac_mode = MAC_MODE_PORT_MODE_GMII;
4141 else if (tp->phy_flags &
4142 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4143 if (tp->link_config.active_speed == SPEED_1000)
4144 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 mac_mode = MAC_MODE_PORT_MODE_MII;
4148 mac_mode = MAC_MODE_PORT_MODE_MII;
4150 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4151 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4152 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4153 SPEED_100 : SPEED_10;
4154 if (tg3_5700_link_polarity(tp, speed))
4155 mac_mode |= MAC_MODE_LINK_POLARITY;
4157 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4160 mac_mode = MAC_MODE_PORT_MODE_TBI;
4163 if (!tg3_flag(tp, 5750_PLUS))
4164 tw32(MAC_LED_CTRL, tp->led_ctrl);
4166 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4167 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4168 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4169 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171 if (tg3_flag(tp, ENABLE_APE))
4172 mac_mode |= MAC_MODE_APE_TX_EN |
4173 MAC_MODE_APE_RX_EN |
4174 MAC_MODE_TDE_ENABLE;
4176 tw32_f(MAC_MODE, mac_mode);
4179 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4183 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4184 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4185 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4188 base_val = tp->pci_clock_ctrl;
4189 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4190 CLOCK_CTRL_TXCLK_DISABLE);
4192 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4193 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4194 } else if (tg3_flag(tp, 5780_CLASS) ||
4195 tg3_flag(tp, CPMU_PRESENT) ||
4196 tg3_asic_rev(tp) == ASIC_REV_5906) {
4198 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4199 u32 newbits1, newbits2;
4201 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4202 tg3_asic_rev(tp) == ASIC_REV_5701) {
4203 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4204 CLOCK_CTRL_TXCLK_DISABLE |
4206 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4207 } else if (tg3_flag(tp, 5705_PLUS)) {
4208 newbits1 = CLOCK_CTRL_625_CORE;
4209 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211 newbits1 = CLOCK_CTRL_ALTCLK;
4212 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4218 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4221 if (!tg3_flag(tp, 5705_PLUS)) {
4224 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4225 tg3_asic_rev(tp) == ASIC_REV_5701) {
4226 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4227 CLOCK_CTRL_TXCLK_DISABLE |
4228 CLOCK_CTRL_44MHZ_CORE);
4230 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4233 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4234 tp->pci_clock_ctrl | newbits3, 40);
4238 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4239 tg3_power_down_phy(tp, do_low_power);
4241 tg3_frob_aux_power(tp, true);
4243 /* Workaround for unstable PLL clock */
4244 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4245 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4246 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4247 u32 val = tr32(0x7d00);
4249 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251 if (!tg3_flag(tp, ENABLE_ASF)) {
4254 err = tg3_nvram_lock(tp);
4255 tg3_halt_cpu(tp, RX_CPU_BASE);
4257 tg3_nvram_unlock(tp);
4261 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4268 static void tg3_power_down(struct tg3 *tp)
4270 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4271 pci_set_power_state(tp->pdev, PCI_D3hot);
4274 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4277 case MII_TG3_AUX_STAT_10HALF:
4279 *duplex = DUPLEX_HALF;
4282 case MII_TG3_AUX_STAT_10FULL:
4284 *duplex = DUPLEX_FULL;
4287 case MII_TG3_AUX_STAT_100HALF:
4289 *duplex = DUPLEX_HALF;
4292 case MII_TG3_AUX_STAT_100FULL:
4294 *duplex = DUPLEX_FULL;
4297 case MII_TG3_AUX_STAT_1000HALF:
4298 *speed = SPEED_1000;
4299 *duplex = DUPLEX_HALF;
4302 case MII_TG3_AUX_STAT_1000FULL:
4303 *speed = SPEED_1000;
4304 *duplex = DUPLEX_FULL;
4308 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4309 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4315 *speed = SPEED_UNKNOWN;
4316 *duplex = DUPLEX_UNKNOWN;
4321 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4326 new_adv = ADVERTISE_CSMA;
4327 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4328 new_adv |= mii_advertise_flowctrl(flowctrl);
4330 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4334 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4335 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4338 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4339 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4349 tw32(TG3_CPMU_EEE_MODE,
4350 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4357 /* Advertise 100-BaseTX EEE ability */
4358 if (advertise & ADVERTISED_100baseT_Full)
4359 val |= MDIO_AN_EEE_ADV_100TX;
4360 /* Advertise 1000-BaseT EEE ability */
4361 if (advertise & ADVERTISED_1000baseT_Full)
4362 val |= MDIO_AN_EEE_ADV_1000T;
4364 if (!tp->eee.eee_enabled) {
4366 tp->eee.advertised = 0;
4368 tp->eee.advertised = advertise &
4369 (ADVERTISED_100baseT_Full |
4370 ADVERTISED_1000baseT_Full);
4373 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4377 switch (tg3_asic_rev(tp)) {
4379 case ASIC_REV_57765:
4380 case ASIC_REV_57766:
4382 /* If we advertised any eee advertisements above... */
4384 val = MII_TG3_DSP_TAP26_ALNOKO |
4385 MII_TG3_DSP_TAP26_RMRXSTO |
4386 MII_TG3_DSP_TAP26_OPCSINPT;
4387 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4391 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4392 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4393 MII_TG3_DSP_CH34TP2_HIBW01);
4396 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4405 static void tg3_phy_copper_begin(struct tg3 *tp)
4407 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4408 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4411 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4412 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4413 adv = ADVERTISED_10baseT_Half |
4414 ADVERTISED_10baseT_Full;
4415 if (tg3_flag(tp, WOL_SPEED_100MB))
4416 adv |= ADVERTISED_100baseT_Half |
4417 ADVERTISED_100baseT_Full;
4418 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4419 if (!(tp->phy_flags &
4420 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4421 adv |= ADVERTISED_1000baseT_Half;
4422 adv |= ADVERTISED_1000baseT_Full;
4425 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4427 adv = tp->link_config.advertising;
4428 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4429 adv &= ~(ADVERTISED_1000baseT_Half |
4430 ADVERTISED_1000baseT_Full);
4432 fc = tp->link_config.flowctrl;
4435 tg3_phy_autoneg_cfg(tp, adv, fc);
4437 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4438 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4439 /* Normally during power down we want to autonegotiate
4440 * the lowest possible speed for WOL. However, to avoid
4441 * link flap, we leave it untouched.
4446 tg3_writephy(tp, MII_BMCR,
4447 BMCR_ANENABLE | BMCR_ANRESTART);
4450 u32 bmcr, orig_bmcr;
4452 tp->link_config.active_speed = tp->link_config.speed;
4453 tp->link_config.active_duplex = tp->link_config.duplex;
4455 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4456 /* With autoneg disabled, 5715 only links up when the
4457 * advertisement register has the configured speed
4460 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4464 switch (tp->link_config.speed) {
4470 bmcr |= BMCR_SPEED100;
4474 bmcr |= BMCR_SPEED1000;
4478 if (tp->link_config.duplex == DUPLEX_FULL)
4479 bmcr |= BMCR_FULLDPLX;
4481 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4482 (bmcr != orig_bmcr)) {
4483 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4484 for (i = 0; i < 1500; i++) {
4488 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4489 tg3_readphy(tp, MII_BMSR, &tmp))
4491 if (!(tmp & BMSR_LSTATUS)) {
4496 tg3_writephy(tp, MII_BMCR, bmcr);
4502 static int tg3_phy_pull_config(struct tg3 *tp)
4507 err = tg3_readphy(tp, MII_BMCR, &val);
4511 if (!(val & BMCR_ANENABLE)) {
4512 tp->link_config.autoneg = AUTONEG_DISABLE;
4513 tp->link_config.advertising = 0;
4514 tg3_flag_clear(tp, PAUSE_AUTONEG);
4518 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4520 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4523 tp->link_config.speed = SPEED_10;
4526 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4529 tp->link_config.speed = SPEED_100;
4531 case BMCR_SPEED1000:
4532 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4533 tp->link_config.speed = SPEED_1000;
4541 if (val & BMCR_FULLDPLX)
4542 tp->link_config.duplex = DUPLEX_FULL;
4544 tp->link_config.duplex = DUPLEX_HALF;
4546 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4552 tp->link_config.autoneg = AUTONEG_ENABLE;
4553 tp->link_config.advertising = ADVERTISED_Autoneg;
4554 tg3_flag_set(tp, PAUSE_AUTONEG);
4556 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4559 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4563 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4564 tp->link_config.advertising |= adv | ADVERTISED_TP;
4566 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4568 tp->link_config.advertising |= ADVERTISED_FIBRE;
4571 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4574 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4575 err = tg3_readphy(tp, MII_CTRL1000, &val);
4579 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4581 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4585 adv = tg3_decode_flowctrl_1000X(val);
4586 tp->link_config.flowctrl = adv;
4588 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4589 adv = mii_adv_to_ethtool_adv_x(val);
4592 tp->link_config.advertising |= adv;
4599 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4603 /* Turn off tap power management. */
4604 /* Set Extended packet length bit */
4605 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4607 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4608 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4609 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4610 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4611 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4618 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4620 struct ethtool_eee eee;
4622 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4625 tg3_eee_pull_config(tp, &eee);
4627 if (tp->eee.eee_enabled) {
4628 if (tp->eee.advertised != eee.advertised ||
4629 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4630 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4633 /* EEE is disabled but we're advertising */
4641 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4643 u32 advmsk, tgtadv, advertising;
4645 advertising = tp->link_config.advertising;
4646 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4648 advmsk = ADVERTISE_ALL;
4649 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4650 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4651 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4654 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4657 if ((*lcladv & advmsk) != tgtadv)
4660 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4663 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4665 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4669 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4670 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4671 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4672 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4673 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4675 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4678 if (tg3_ctrl != tgtadv)
4685 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4689 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4692 if (tg3_readphy(tp, MII_STAT1000, &val))
4695 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4698 if (tg3_readphy(tp, MII_LPA, rmtadv))
4701 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4702 tp->link_config.rmt_adv = lpeth;
4707 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4709 if (curr_link_up != tp->link_up) {
4711 netif_carrier_on(tp->dev);
4713 netif_carrier_off(tp->dev);
4714 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4715 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4718 tg3_link_report(tp);
4725 static void tg3_clear_mac_status(struct tg3 *tp)
4730 MAC_STATUS_SYNC_CHANGED |
4731 MAC_STATUS_CFG_CHANGED |
4732 MAC_STATUS_MI_COMPLETION |
4733 MAC_STATUS_LNKSTATE_CHANGED);
4737 static void tg3_setup_eee(struct tg3 *tp)
4741 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4742 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4743 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4744 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4746 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4748 tw32_f(TG3_CPMU_EEE_CTRL,
4749 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4751 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4752 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4753 TG3_CPMU_EEEMD_LPI_IN_RX |
4754 TG3_CPMU_EEEMD_EEE_ENABLE;
4756 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4757 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4759 if (tg3_flag(tp, ENABLE_APE))
4760 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4762 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4764 tw32_f(TG3_CPMU_EEE_DBTMR1,
4765 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4766 (tp->eee.tx_lpi_timer & 0xffff));
4768 tw32_f(TG3_CPMU_EEE_DBTMR2,
4769 TG3_CPMU_DBTMR2_APE_TX_2047US |
4770 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4773 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4775 bool current_link_up;
4777 u32 lcl_adv, rmt_adv;
4782 tg3_clear_mac_status(tp);
4784 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4786 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4790 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4792 /* Some third-party PHYs need to be reset on link going
4795 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4796 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4797 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4799 tg3_readphy(tp, MII_BMSR, &bmsr);
4800 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4801 !(bmsr & BMSR_LSTATUS))
4807 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4808 tg3_readphy(tp, MII_BMSR, &bmsr);
4809 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4810 !tg3_flag(tp, INIT_COMPLETE))
4813 if (!(bmsr & BMSR_LSTATUS)) {
4814 err = tg3_init_5401phy_dsp(tp);
4818 tg3_readphy(tp, MII_BMSR, &bmsr);
4819 for (i = 0; i < 1000; i++) {
4821 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4822 (bmsr & BMSR_LSTATUS)) {
4828 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4829 TG3_PHY_REV_BCM5401_B0 &&
4830 !(bmsr & BMSR_LSTATUS) &&
4831 tp->link_config.active_speed == SPEED_1000) {
4832 err = tg3_phy_reset(tp);
4834 err = tg3_init_5401phy_dsp(tp);
4839 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4840 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4841 /* 5701 {A0,B0} CRC bug workaround */
4842 tg3_writephy(tp, 0x15, 0x0a75);
4843 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4844 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4845 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4848 /* Clear pending interrupts... */
4849 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4850 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4853 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4854 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4855 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4857 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4858 tg3_asic_rev(tp) == ASIC_REV_5701) {
4859 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4860 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4861 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4863 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4866 current_link_up = false;
4867 current_speed = SPEED_UNKNOWN;
4868 current_duplex = DUPLEX_UNKNOWN;
4869 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4870 tp->link_config.rmt_adv = 0;
4872 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4873 err = tg3_phy_auxctl_read(tp,
4874 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4876 if (!err && !(val & (1 << 10))) {
4877 tg3_phy_auxctl_write(tp,
4878 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4885 for (i = 0; i < 100; i++) {
4886 tg3_readphy(tp, MII_BMSR, &bmsr);
4887 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4888 (bmsr & BMSR_LSTATUS))
4893 if (bmsr & BMSR_LSTATUS) {
4896 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4897 for (i = 0; i < 2000; i++) {
4899 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4904 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4909 for (i = 0; i < 200; i++) {
4910 tg3_readphy(tp, MII_BMCR, &bmcr);
4911 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4913 if (bmcr && bmcr != 0x7fff)
4921 tp->link_config.active_speed = current_speed;
4922 tp->link_config.active_duplex = current_duplex;
4924 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4925 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4927 if ((bmcr & BMCR_ANENABLE) &&
4929 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4930 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4931 current_link_up = true;
4933 /* EEE settings changes take effect only after a phy
4934 * reset. If we have skipped a reset due to Link Flap
4935 * Avoidance being enabled, do it now.
4937 if (!eee_config_ok &&
4938 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4944 if (!(bmcr & BMCR_ANENABLE) &&
4945 tp->link_config.speed == current_speed &&
4946 tp->link_config.duplex == current_duplex) {
4947 current_link_up = true;
4951 if (current_link_up &&
4952 tp->link_config.active_duplex == DUPLEX_FULL) {
4955 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4956 reg = MII_TG3_FET_GEN_STAT;
4957 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4959 reg = MII_TG3_EXT_STAT;
4960 bit = MII_TG3_EXT_STAT_MDIX;
4963 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4964 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4966 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4971 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4972 tg3_phy_copper_begin(tp);
4974 if (tg3_flag(tp, ROBOSWITCH)) {
4975 current_link_up = true;
4976 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4977 current_speed = SPEED_1000;
4978 current_duplex = DUPLEX_FULL;
4979 tp->link_config.active_speed = current_speed;
4980 tp->link_config.active_duplex = current_duplex;
4983 tg3_readphy(tp, MII_BMSR, &bmsr);
4984 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4985 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4986 current_link_up = true;
4989 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4990 if (current_link_up) {
4991 if (tp->link_config.active_speed == SPEED_100 ||
4992 tp->link_config.active_speed == SPEED_10)
4993 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4995 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4996 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4997 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001 /* In order for the 5750 core in BCM4785 chip to work properly
5002 * in RGMII mode, the Led Control Register must be set up.
5004 if (tg3_flag(tp, RGMII_MODE)) {
5005 u32 led_ctrl = tr32(MAC_LED_CTRL);
5006 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5008 if (tp->link_config.active_speed == SPEED_10)
5009 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5010 else if (tp->link_config.active_speed == SPEED_100)
5011 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5012 LED_CTRL_100MBPS_ON);
5013 else if (tp->link_config.active_speed == SPEED_1000)
5014 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5015 LED_CTRL_1000MBPS_ON);
5017 tw32(MAC_LED_CTRL, led_ctrl);
5021 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5022 if (tp->link_config.active_duplex == DUPLEX_HALF)
5023 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5025 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5026 if (current_link_up &&
5027 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5028 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5030 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5033 /* ??? Without this setting Netgear GA302T PHY does not
5034 * ??? send/receive packets...
5036 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5037 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5038 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5039 tw32_f(MAC_MI_MODE, tp->mi_mode);
5043 tw32_f(MAC_MODE, tp->mac_mode);
5046 tg3_phy_eee_adjust(tp, current_link_up);
5048 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5049 /* Polled via timer. */
5050 tw32_f(MAC_EVENT, 0);
5052 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5056 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5058 tp->link_config.active_speed == SPEED_1000 &&
5059 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5062 (MAC_STATUS_SYNC_CHANGED |
5063 MAC_STATUS_CFG_CHANGED));
5066 NIC_SRAM_FIRMWARE_MBOX,
5067 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5070 /* Prevent send BD corruption. */
5071 if (tg3_flag(tp, CLKREQ_BUG)) {
5072 if (tp->link_config.active_speed == SPEED_100 ||
5073 tp->link_config.active_speed == SPEED_10)
5074 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5075 PCI_EXP_LNKCTL_CLKREQ_EN);
5077 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5078 PCI_EXP_LNKCTL_CLKREQ_EN);
5081 tg3_test_and_report_link_chg(tp, current_link_up);
5086 struct tg3_fiber_aneginfo {
5088 #define ANEG_STATE_UNKNOWN 0
5089 #define ANEG_STATE_AN_ENABLE 1
5090 #define ANEG_STATE_RESTART_INIT 2
5091 #define ANEG_STATE_RESTART 3
5092 #define ANEG_STATE_DISABLE_LINK_OK 4
5093 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5094 #define ANEG_STATE_ABILITY_DETECT 6
5095 #define ANEG_STATE_ACK_DETECT_INIT 7
5096 #define ANEG_STATE_ACK_DETECT 8
5097 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5098 #define ANEG_STATE_COMPLETE_ACK 10
5099 #define ANEG_STATE_IDLE_DETECT_INIT 11
5100 #define ANEG_STATE_IDLE_DETECT 12
5101 #define ANEG_STATE_LINK_OK 13
5102 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5103 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5106 #define MR_AN_ENABLE 0x00000001
5107 #define MR_RESTART_AN 0x00000002
5108 #define MR_AN_COMPLETE 0x00000004
5109 #define MR_PAGE_RX 0x00000008
5110 #define MR_NP_LOADED 0x00000010
5111 #define MR_TOGGLE_TX 0x00000020
5112 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5113 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5114 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5115 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5116 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5117 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5118 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5119 #define MR_TOGGLE_RX 0x00002000
5120 #define MR_NP_RX 0x00004000
5122 #define MR_LINK_OK 0x80000000
5124 unsigned long link_time, cur_time;
5126 u32 ability_match_cfg;
5127 int ability_match_count;
5129 char ability_match, idle_match, ack_match;
5131 u32 txconfig, rxconfig;
5132 #define ANEG_CFG_NP 0x00000080
5133 #define ANEG_CFG_ACK 0x00000040
5134 #define ANEG_CFG_RF2 0x00000020
5135 #define ANEG_CFG_RF1 0x00000010
5136 #define ANEG_CFG_PS2 0x00000001
5137 #define ANEG_CFG_PS1 0x00008000
5138 #define ANEG_CFG_HD 0x00004000
5139 #define ANEG_CFG_FD 0x00002000
5140 #define ANEG_CFG_INVAL 0x00001f06
5145 #define ANEG_TIMER_ENAB 2
5146 #define ANEG_FAILED -1
5148 #define ANEG_STATE_SETTLE_TIME 10000
5150 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5151 struct tg3_fiber_aneginfo *ap)
5154 unsigned long delta;
5158 if (ap->state == ANEG_STATE_UNKNOWN) {
5162 ap->ability_match_cfg = 0;
5163 ap->ability_match_count = 0;
5164 ap->ability_match = 0;
5170 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5171 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5173 if (rx_cfg_reg != ap->ability_match_cfg) {
5174 ap->ability_match_cfg = rx_cfg_reg;
5175 ap->ability_match = 0;
5176 ap->ability_match_count = 0;
5178 if (++ap->ability_match_count > 1) {
5179 ap->ability_match = 1;
5180 ap->ability_match_cfg = rx_cfg_reg;
5183 if (rx_cfg_reg & ANEG_CFG_ACK)
5191 ap->ability_match_cfg = 0;
5192 ap->ability_match_count = 0;
5193 ap->ability_match = 0;
5199 ap->rxconfig = rx_cfg_reg;
5202 switch (ap->state) {
5203 case ANEG_STATE_UNKNOWN:
5204 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5205 ap->state = ANEG_STATE_AN_ENABLE;
5208 case ANEG_STATE_AN_ENABLE:
5209 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5210 if (ap->flags & MR_AN_ENABLE) {
5213 ap->ability_match_cfg = 0;
5214 ap->ability_match_count = 0;
5215 ap->ability_match = 0;
5219 ap->state = ANEG_STATE_RESTART_INIT;
5221 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5225 case ANEG_STATE_RESTART_INIT:
5226 ap->link_time = ap->cur_time;
5227 ap->flags &= ~(MR_NP_LOADED);
5229 tw32(MAC_TX_AUTO_NEG, 0);
5230 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5231 tw32_f(MAC_MODE, tp->mac_mode);
5234 ret = ANEG_TIMER_ENAB;
5235 ap->state = ANEG_STATE_RESTART;
5238 case ANEG_STATE_RESTART:
5239 delta = ap->cur_time - ap->link_time;
5240 if (delta > ANEG_STATE_SETTLE_TIME)
5241 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5243 ret = ANEG_TIMER_ENAB;
5246 case ANEG_STATE_DISABLE_LINK_OK:
5250 case ANEG_STATE_ABILITY_DETECT_INIT:
5251 ap->flags &= ~(MR_TOGGLE_TX);
5252 ap->txconfig = ANEG_CFG_FD;
5253 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5254 if (flowctrl & ADVERTISE_1000XPAUSE)
5255 ap->txconfig |= ANEG_CFG_PS1;
5256 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5257 ap->txconfig |= ANEG_CFG_PS2;
5258 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5259 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5260 tw32_f(MAC_MODE, tp->mac_mode);
5263 ap->state = ANEG_STATE_ABILITY_DETECT;
5266 case ANEG_STATE_ABILITY_DETECT:
5267 if (ap->ability_match != 0 && ap->rxconfig != 0)
5268 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5271 case ANEG_STATE_ACK_DETECT_INIT:
5272 ap->txconfig |= ANEG_CFG_ACK;
5273 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5274 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5275 tw32_f(MAC_MODE, tp->mac_mode);
5278 ap->state = ANEG_STATE_ACK_DETECT;
5281 case ANEG_STATE_ACK_DETECT:
5282 if (ap->ack_match != 0) {
5283 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5284 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5285 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5287 ap->state = ANEG_STATE_AN_ENABLE;
5289 } else if (ap->ability_match != 0 &&
5290 ap->rxconfig == 0) {
5291 ap->state = ANEG_STATE_AN_ENABLE;
5295 case ANEG_STATE_COMPLETE_ACK_INIT:
5296 if (ap->rxconfig & ANEG_CFG_INVAL) {
5300 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5301 MR_LP_ADV_HALF_DUPLEX |
5302 MR_LP_ADV_SYM_PAUSE |
5303 MR_LP_ADV_ASYM_PAUSE |
5304 MR_LP_ADV_REMOTE_FAULT1 |
5305 MR_LP_ADV_REMOTE_FAULT2 |
5306 MR_LP_ADV_NEXT_PAGE |
5309 if (ap->rxconfig & ANEG_CFG_FD)
5310 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5311 if (ap->rxconfig & ANEG_CFG_HD)
5312 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5313 if (ap->rxconfig & ANEG_CFG_PS1)
5314 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5315 if (ap->rxconfig & ANEG_CFG_PS2)
5316 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5317 if (ap->rxconfig & ANEG_CFG_RF1)
5318 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5319 if (ap->rxconfig & ANEG_CFG_RF2)
5320 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5321 if (ap->rxconfig & ANEG_CFG_NP)
5322 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5324 ap->link_time = ap->cur_time;
5326 ap->flags ^= (MR_TOGGLE_TX);
5327 if (ap->rxconfig & 0x0008)
5328 ap->flags |= MR_TOGGLE_RX;
5329 if (ap->rxconfig & ANEG_CFG_NP)
5330 ap->flags |= MR_NP_RX;
5331 ap->flags |= MR_PAGE_RX;
5333 ap->state = ANEG_STATE_COMPLETE_ACK;
5334 ret = ANEG_TIMER_ENAB;
5337 case ANEG_STATE_COMPLETE_ACK:
5338 if (ap->ability_match != 0 &&
5339 ap->rxconfig == 0) {
5340 ap->state = ANEG_STATE_AN_ENABLE;
5343 delta = ap->cur_time - ap->link_time;
5344 if (delta > ANEG_STATE_SETTLE_TIME) {
5345 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5346 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5348 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5349 !(ap->flags & MR_NP_RX)) {
5350 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5358 case ANEG_STATE_IDLE_DETECT_INIT:
5359 ap->link_time = ap->cur_time;
5360 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5361 tw32_f(MAC_MODE, tp->mac_mode);
5364 ap->state = ANEG_STATE_IDLE_DETECT;
5365 ret = ANEG_TIMER_ENAB;
5368 case ANEG_STATE_IDLE_DETECT:
5369 if (ap->ability_match != 0 &&
5370 ap->rxconfig == 0) {
5371 ap->state = ANEG_STATE_AN_ENABLE;
5374 delta = ap->cur_time - ap->link_time;
5375 if (delta > ANEG_STATE_SETTLE_TIME) {
5376 /* XXX another gem from the Broadcom driver :( */
5377 ap->state = ANEG_STATE_LINK_OK;
5381 case ANEG_STATE_LINK_OK:
5382 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5386 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5387 /* ??? unimplemented */
5390 case ANEG_STATE_NEXT_PAGE_WAIT:
5391 /* ??? unimplemented */
5402 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5405 struct tg3_fiber_aneginfo aninfo;
5406 int status = ANEG_FAILED;
5410 tw32_f(MAC_TX_AUTO_NEG, 0);
5412 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5413 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5416 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5419 memset(&aninfo, 0, sizeof(aninfo));
5420 aninfo.flags |= MR_AN_ENABLE;
5421 aninfo.state = ANEG_STATE_UNKNOWN;
5422 aninfo.cur_time = 0;
5424 while (++tick < 195000) {
5425 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5426 if (status == ANEG_DONE || status == ANEG_FAILED)
5432 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5433 tw32_f(MAC_MODE, tp->mac_mode);
5436 *txflags = aninfo.txconfig;
5437 *rxflags = aninfo.flags;
5439 if (status == ANEG_DONE &&
5440 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5441 MR_LP_ADV_FULL_DUPLEX)))
5447 static void tg3_init_bcm8002(struct tg3 *tp)
5449 u32 mac_status = tr32(MAC_STATUS);
5452 /* Reset when initting first time or we have a link. */
5453 if (tg3_flag(tp, INIT_COMPLETE) &&
5454 !(mac_status & MAC_STATUS_PCS_SYNCED))
5457 /* Set PLL lock range. */
5458 tg3_writephy(tp, 0x16, 0x8007);
5461 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5463 /* Wait for reset to complete. */
5464 /* XXX schedule_timeout() ... */
5465 for (i = 0; i < 500; i++)
5468 /* Config mode; select PMA/Ch 1 regs. */
5469 tg3_writephy(tp, 0x10, 0x8411);
5471 /* Enable auto-lock and comdet, select txclk for tx. */
5472 tg3_writephy(tp, 0x11, 0x0a10);
5474 tg3_writephy(tp, 0x18, 0x00a0);
5475 tg3_writephy(tp, 0x16, 0x41ff);
5477 /* Assert and deassert POR. */
5478 tg3_writephy(tp, 0x13, 0x0400);
5480 tg3_writephy(tp, 0x13, 0x0000);
5482 tg3_writephy(tp, 0x11, 0x0a50);
5484 tg3_writephy(tp, 0x11, 0x0a10);
5486 /* Wait for signal to stabilize */
5487 /* XXX schedule_timeout() ... */
5488 for (i = 0; i < 15000; i++)
5491 /* Deselect the channel register so we can read the PHYID
5494 tg3_writephy(tp, 0x10, 0x8011);
5497 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5500 bool current_link_up;
5501 u32 sg_dig_ctrl, sg_dig_status;
5502 u32 serdes_cfg, expected_sg_dig_ctrl;
5503 int workaround, port_a;
5508 current_link_up = false;
5510 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5511 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5513 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5516 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5517 /* preserve bits 20-23 for voltage regulator */
5518 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5521 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5523 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5524 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5526 u32 val = serdes_cfg;
5532 tw32_f(MAC_SERDES_CFG, val);
5535 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5537 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5538 tg3_setup_flow_control(tp, 0, 0);
5539 current_link_up = true;
5544 /* Want auto-negotiation. */
5545 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5547 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548 if (flowctrl & ADVERTISE_1000XPAUSE)
5549 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5550 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5551 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5553 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5554 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5555 tp->serdes_counter &&
5556 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5557 MAC_STATUS_RCVD_CFG)) ==
5558 MAC_STATUS_PCS_SYNCED)) {
5559 tp->serdes_counter--;
5560 current_link_up = true;
5565 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5566 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5568 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5570 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5571 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5572 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5573 MAC_STATUS_SIGNAL_DET)) {
5574 sg_dig_status = tr32(SG_DIG_STATUS);
5575 mac_status = tr32(MAC_STATUS);
5577 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5578 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5579 u32 local_adv = 0, remote_adv = 0;
5581 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5582 local_adv |= ADVERTISE_1000XPAUSE;
5583 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5584 local_adv |= ADVERTISE_1000XPSE_ASYM;
5586 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5587 remote_adv |= LPA_1000XPAUSE;
5588 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5589 remote_adv |= LPA_1000XPAUSE_ASYM;
5591 tp->link_config.rmt_adv =
5592 mii_adv_to_ethtool_adv_x(remote_adv);
5594 tg3_setup_flow_control(tp, local_adv, remote_adv);
5595 current_link_up = true;
5596 tp->serdes_counter = 0;
5597 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5598 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5599 if (tp->serdes_counter)
5600 tp->serdes_counter--;
5603 u32 val = serdes_cfg;
5610 tw32_f(MAC_SERDES_CFG, val);
5613 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5616 /* Link parallel detection - link is up */
5617 /* only if we have PCS_SYNC and not */
5618 /* receiving config code words */
5619 mac_status = tr32(MAC_STATUS);
5620 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5621 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5622 tg3_setup_flow_control(tp, 0, 0);
5623 current_link_up = true;
5625 TG3_PHYFLG_PARALLEL_DETECT;
5626 tp->serdes_counter =
5627 SERDES_PARALLEL_DET_TIMEOUT;
5629 goto restart_autoneg;
5633 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5634 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5638 return current_link_up;
5641 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5643 bool current_link_up = false;
5645 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5648 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5649 u32 txflags, rxflags;
5652 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5653 u32 local_adv = 0, remote_adv = 0;
5655 if (txflags & ANEG_CFG_PS1)
5656 local_adv |= ADVERTISE_1000XPAUSE;
5657 if (txflags & ANEG_CFG_PS2)
5658 local_adv |= ADVERTISE_1000XPSE_ASYM;
5660 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5661 remote_adv |= LPA_1000XPAUSE;
5662 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5663 remote_adv |= LPA_1000XPAUSE_ASYM;
5665 tp->link_config.rmt_adv =
5666 mii_adv_to_ethtool_adv_x(remote_adv);
5668 tg3_setup_flow_control(tp, local_adv, remote_adv);
5670 current_link_up = true;
5672 for (i = 0; i < 30; i++) {
5675 (MAC_STATUS_SYNC_CHANGED |
5676 MAC_STATUS_CFG_CHANGED));
5678 if ((tr32(MAC_STATUS) &
5679 (MAC_STATUS_SYNC_CHANGED |
5680 MAC_STATUS_CFG_CHANGED)) == 0)
5684 mac_status = tr32(MAC_STATUS);
5685 if (!current_link_up &&
5686 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5687 !(mac_status & MAC_STATUS_RCVD_CFG))
5688 current_link_up = true;
5690 tg3_setup_flow_control(tp, 0, 0);
5692 /* Forcing 1000FD link up. */
5693 current_link_up = true;
5695 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5698 tw32_f(MAC_MODE, tp->mac_mode);
5703 return current_link_up;
5706 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5709 u32 orig_active_speed;
5710 u8 orig_active_duplex;
5712 bool current_link_up;
5715 orig_pause_cfg = tp->link_config.active_flowctrl;
5716 orig_active_speed = tp->link_config.active_speed;
5717 orig_active_duplex = tp->link_config.active_duplex;
5719 if (!tg3_flag(tp, HW_AUTONEG) &&
5721 tg3_flag(tp, INIT_COMPLETE)) {
5722 mac_status = tr32(MAC_STATUS);
5723 mac_status &= (MAC_STATUS_PCS_SYNCED |
5724 MAC_STATUS_SIGNAL_DET |
5725 MAC_STATUS_CFG_CHANGED |
5726 MAC_STATUS_RCVD_CFG);
5727 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5728 MAC_STATUS_SIGNAL_DET)) {
5729 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5730 MAC_STATUS_CFG_CHANGED));
5735 tw32_f(MAC_TX_AUTO_NEG, 0);
5737 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5738 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5739 tw32_f(MAC_MODE, tp->mac_mode);
5742 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5743 tg3_init_bcm8002(tp);
5745 /* Enable link change event even when serdes polling. */
5746 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5749 tp->link_config.rmt_adv = 0;
5750 mac_status = tr32(MAC_STATUS);
5752 if (tg3_flag(tp, HW_AUTONEG))
5753 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5755 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757 tp->napi[0].hw_status->status =
5758 (SD_STATUS_UPDATED |
5759 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761 for (i = 0; i < 100; i++) {
5762 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5763 MAC_STATUS_CFG_CHANGED));
5765 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5766 MAC_STATUS_CFG_CHANGED |
5767 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5771 mac_status = tr32(MAC_STATUS);
5772 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5773 current_link_up = false;
5774 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5775 tp->serdes_counter == 0) {
5776 tw32_f(MAC_MODE, (tp->mac_mode |
5777 MAC_MODE_SEND_CONFIGS));
5779 tw32_f(MAC_MODE, tp->mac_mode);
5783 if (current_link_up) {
5784 tp->link_config.active_speed = SPEED_1000;
5785 tp->link_config.active_duplex = DUPLEX_FULL;
5786 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5787 LED_CTRL_LNKLED_OVERRIDE |
5788 LED_CTRL_1000MBPS_ON));
5790 tp->link_config.active_speed = SPEED_UNKNOWN;
5791 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5792 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5793 LED_CTRL_LNKLED_OVERRIDE |
5794 LED_CTRL_TRAFFIC_OVERRIDE));
5797 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5798 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5799 if (orig_pause_cfg != now_pause_cfg ||
5800 orig_active_speed != tp->link_config.active_speed ||
5801 orig_active_duplex != tp->link_config.active_duplex)
5802 tg3_link_report(tp);
5808 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5812 u32 current_speed = SPEED_UNKNOWN;
5813 u8 current_duplex = DUPLEX_UNKNOWN;
5814 bool current_link_up = false;
5815 u32 local_adv, remote_adv, sgsr;
5817 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5818 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5819 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5820 (sgsr & SERDES_TG3_SGMII_MODE)) {
5825 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5828 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830 current_link_up = true;
5831 if (sgsr & SERDES_TG3_SPEED_1000) {
5832 current_speed = SPEED_1000;
5833 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5834 } else if (sgsr & SERDES_TG3_SPEED_100) {
5835 current_speed = SPEED_100;
5836 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838 current_speed = SPEED_10;
5839 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5842 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5843 current_duplex = DUPLEX_FULL;
5845 current_duplex = DUPLEX_HALF;
5848 tw32_f(MAC_MODE, tp->mac_mode);
5851 tg3_clear_mac_status(tp);
5853 goto fiber_setup_done;
5856 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5857 tw32_f(MAC_MODE, tp->mac_mode);
5860 tg3_clear_mac_status(tp);
5865 tp->link_config.rmt_adv = 0;
5867 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5868 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5870 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5871 bmsr |= BMSR_LSTATUS;
5873 bmsr &= ~BMSR_LSTATUS;
5876 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5879 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5880 /* do nothing, just check for link up at the end */
5881 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5884 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5885 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5886 ADVERTISE_1000XPAUSE |
5887 ADVERTISE_1000XPSE_ASYM |
5890 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5891 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5894 tg3_writephy(tp, MII_ADVERTISE, newadv);
5895 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5896 tg3_writephy(tp, MII_BMCR, bmcr);
5898 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5899 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5900 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5907 bmcr &= ~BMCR_SPEED1000;
5908 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910 if (tp->link_config.duplex == DUPLEX_FULL)
5911 new_bmcr |= BMCR_FULLDPLX;
5913 if (new_bmcr != bmcr) {
5914 /* BMCR_SPEED1000 is a reserved bit that needs
5915 * to be set on write.
5917 new_bmcr |= BMCR_SPEED1000;
5919 /* Force a linkdown */
5923 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5924 adv &= ~(ADVERTISE_1000XFULL |
5925 ADVERTISE_1000XHALF |
5927 tg3_writephy(tp, MII_ADVERTISE, adv);
5928 tg3_writephy(tp, MII_BMCR, bmcr |
5932 tg3_carrier_off(tp);
5934 tg3_writephy(tp, MII_BMCR, new_bmcr);
5936 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5937 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5939 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5940 bmsr |= BMSR_LSTATUS;
5942 bmsr &= ~BMSR_LSTATUS;
5944 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5948 if (bmsr & BMSR_LSTATUS) {
5949 current_speed = SPEED_1000;
5950 current_link_up = true;
5951 if (bmcr & BMCR_FULLDPLX)
5952 current_duplex = DUPLEX_FULL;
5954 current_duplex = DUPLEX_HALF;
5959 if (bmcr & BMCR_ANENABLE) {
5962 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5963 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5964 common = local_adv & remote_adv;
5965 if (common & (ADVERTISE_1000XHALF |
5966 ADVERTISE_1000XFULL)) {
5967 if (common & ADVERTISE_1000XFULL)
5968 current_duplex = DUPLEX_FULL;
5970 current_duplex = DUPLEX_HALF;
5972 tp->link_config.rmt_adv =
5973 mii_adv_to_ethtool_adv_x(remote_adv);
5974 } else if (!tg3_flag(tp, 5780_CLASS)) {
5975 /* Link is up via parallel detect */
5977 current_link_up = false;
5983 if (current_link_up && current_duplex == DUPLEX_FULL)
5984 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5987 if (tp->link_config.active_duplex == DUPLEX_HALF)
5988 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990 tw32_f(MAC_MODE, tp->mac_mode);
5993 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995 tp->link_config.active_speed = current_speed;
5996 tp->link_config.active_duplex = current_duplex;
5998 tg3_test_and_report_link_chg(tp, current_link_up);
6002 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 if (tp->serdes_counter) {
6005 /* Give autoneg time to complete. */
6006 tp->serdes_counter--;
6011 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6014 tg3_readphy(tp, MII_BMCR, &bmcr);
6015 if (bmcr & BMCR_ANENABLE) {
6018 /* Select shadow register 0x1f */
6019 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6020 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022 /* Select expansion interrupt status register */
6023 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6024 MII_TG3_DSP_EXP1_INT_STAT);
6025 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6026 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6029 /* We have signal detect and not receiving
6030 * config code words, link is up by parallel
6034 bmcr &= ~BMCR_ANENABLE;
6035 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6036 tg3_writephy(tp, MII_BMCR, bmcr);
6037 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6040 } else if (tp->link_up &&
6041 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6042 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6045 /* Select expansion interrupt status register */
6046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6047 MII_TG3_DSP_EXP1_INT_STAT);
6048 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6052 /* Config code words received, turn on autoneg. */
6053 tg3_readphy(tp, MII_BMCR, &bmcr);
6054 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6062 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6067 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6068 err = tg3_setup_fiber_phy(tp, force_reset);
6069 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6070 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6072 err = tg3_setup_copper_phy(tp, force_reset);
6074 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6077 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6078 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6080 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6085 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6086 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6087 tw32(GRC_MISC_CFG, val);
6090 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6091 (6 << TX_LENGTHS_IPG_SHIFT);
6092 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6093 tg3_asic_rev(tp) == ASIC_REV_5762)
6094 val |= tr32(MAC_TX_LENGTHS) &
6095 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6096 TX_LENGTHS_CNT_DWN_VAL_MSK);
6098 if (tp->link_config.active_speed == SPEED_1000 &&
6099 tp->link_config.active_duplex == DUPLEX_HALF)
6100 tw32(MAC_TX_LENGTHS, val |
6101 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6103 tw32(MAC_TX_LENGTHS, val |
6104 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106 if (!tg3_flag(tp, 5705_PLUS)) {
6108 tw32(HOSTCC_STAT_COAL_TICKS,
6109 tp->coal.stats_block_coalesce_usecs);
6111 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6115 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6116 val = tr32(PCIE_PWR_MGMT_THRESH);
6118 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6121 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6122 tw32(PCIE_PWR_MGMT_THRESH, val);
6128 /* tp->lock must be held */
6129 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6133 ptp_read_system_prets(sts);
6134 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6135 ptp_read_system_postts(sts);
6136 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6141 /* tp->lock must be held */
6142 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6147 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6148 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6149 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6152 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6153 static inline void tg3_full_unlock(struct tg3 *tp);
6154 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 struct tg3 *tp = netdev_priv(dev);
6158 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6159 SOF_TIMESTAMPING_RX_SOFTWARE |
6160 SOF_TIMESTAMPING_SOFTWARE;
6162 if (tg3_flag(tp, PTP_CAPABLE)) {
6163 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6164 SOF_TIMESTAMPING_RX_HARDWARE |
6165 SOF_TIMESTAMPING_RAW_HARDWARE;
6169 info->phc_index = ptp_clock_index(tp->ptp_clock);
6171 info->phc_index = -1;
6173 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6176 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6177 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6178 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6182 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6184 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6185 bool neg_adj = false;
6193 /* Frequency adjustment is performed using hardware with a 24 bit
6194 * accumulator and a programmable correction value. On each clk, the
6195 * correction value gets added to the accumulator and when it
6196 * overflows, the time counter is incremented/decremented.
6198 * So conversion from ppb to correction value is
6199 * ppb * (1 << 24) / 1000000000
6201 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6202 TG3_EAV_REF_CLK_CORRECT_MASK;
6204 tg3_full_lock(tp, 0);
6207 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6208 TG3_EAV_REF_CLK_CORRECT_EN |
6209 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6211 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6213 tg3_full_unlock(tp);
6218 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6220 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222 tg3_full_lock(tp, 0);
6223 tp->ptp_adjust += delta;
6224 tg3_full_unlock(tp);
6229 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6230 struct ptp_system_timestamp *sts)
6233 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6235 tg3_full_lock(tp, 0);
6236 ns = tg3_refclk_read(tp, sts);
6237 ns += tp->ptp_adjust;
6238 tg3_full_unlock(tp);
6240 *ts = ns_to_timespec64(ns);
6245 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6246 const struct timespec64 *ts)
6249 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6251 ns = timespec64_to_ns(ts);
6253 tg3_full_lock(tp, 0);
6254 tg3_refclk_write(tp, ns);
6256 tg3_full_unlock(tp);
6261 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6262 struct ptp_clock_request *rq, int on)
6264 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6269 case PTP_CLK_REQ_PEROUT:
6270 /* Reject requests with unsupported flags */
6271 if (rq->perout.flags)
6274 if (rq->perout.index != 0)
6277 tg3_full_lock(tp, 0);
6278 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6279 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6284 nsec = rq->perout.start.sec * 1000000000ULL +
6285 rq->perout.start.nsec;
6287 if (rq->perout.period.sec || rq->perout.period.nsec) {
6288 netdev_warn(tp->dev,
6289 "Device supports only a one-shot timesync output, period must be 0\n");
6294 if (nsec & (1ULL << 63)) {
6295 netdev_warn(tp->dev,
6296 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6301 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6302 tw32(TG3_EAV_WATCHDOG0_MSB,
6303 TG3_EAV_WATCHDOG0_EN |
6304 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6306 tw32(TG3_EAV_REF_CLCK_CTL,
6307 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6309 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6310 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6314 tg3_full_unlock(tp);
6324 static const struct ptp_clock_info tg3_ptp_caps = {
6325 .owner = THIS_MODULE,
6326 .name = "tg3 clock",
6327 .max_adj = 250000000,
6333 .adjfreq = tg3_ptp_adjfreq,
6334 .adjtime = tg3_ptp_adjtime,
6335 .gettimex64 = tg3_ptp_gettimex,
6336 .settime64 = tg3_ptp_settime,
6337 .enable = tg3_ptp_enable,
6340 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6341 struct skb_shared_hwtstamps *timestamp)
6343 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6344 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6348 /* tp->lock must be held */
6349 static void tg3_ptp_init(struct tg3 *tp)
6351 if (!tg3_flag(tp, PTP_CAPABLE))
6354 /* Initialize the hardware clock to the system time. */
6355 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6357 tp->ptp_info = tg3_ptp_caps;
6360 /* tp->lock must be held */
6361 static void tg3_ptp_resume(struct tg3 *tp)
6363 if (!tg3_flag(tp, PTP_CAPABLE))
6366 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6370 static void tg3_ptp_fini(struct tg3 *tp)
6372 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6375 ptp_clock_unregister(tp->ptp_clock);
6376 tp->ptp_clock = NULL;
6380 static inline int tg3_irq_sync(struct tg3 *tp)
6382 return tp->irq_sync;
6385 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6389 dst = (u32 *)((u8 *)dst + off);
6390 for (i = 0; i < len; i += sizeof(u32))
6391 *dst++ = tr32(off + i);
6394 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6396 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6397 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6398 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6399 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6400 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6401 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6402 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6403 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6404 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6405 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6406 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6407 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6408 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6409 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6410 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6411 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6412 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6413 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6414 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6416 if (tg3_flag(tp, SUPPORT_MSIX))
6417 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6419 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6420 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6421 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6422 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6423 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6424 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6425 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6426 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6428 if (!tg3_flag(tp, 5705_PLUS)) {
6429 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6430 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6431 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6434 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6435 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6436 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6437 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6438 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6440 if (tg3_flag(tp, NVRAM))
6441 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6444 static void tg3_dump_state(struct tg3 *tp)
6449 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6453 if (tg3_flag(tp, PCI_EXPRESS)) {
6454 /* Read up to but not including private PCI registers */
6455 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6456 regs[i / sizeof(u32)] = tr32(i);
6458 tg3_dump_legacy_regs(tp, regs);
6460 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6461 if (!regs[i + 0] && !regs[i + 1] &&
6462 !regs[i + 2] && !regs[i + 3])
6465 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6467 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6472 for (i = 0; i < tp->irq_cnt; i++) {
6473 struct tg3_napi *tnapi = &tp->napi[i];
6475 /* SW status block */
6477 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6479 tnapi->hw_status->status,
6480 tnapi->hw_status->status_tag,
6481 tnapi->hw_status->rx_jumbo_consumer,
6482 tnapi->hw_status->rx_consumer,
6483 tnapi->hw_status->rx_mini_consumer,
6484 tnapi->hw_status->idx[0].rx_producer,
6485 tnapi->hw_status->idx[0].tx_consumer);
6488 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6490 tnapi->last_tag, tnapi->last_irq_tag,
6491 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6493 tnapi->prodring.rx_std_prod_idx,
6494 tnapi->prodring.rx_std_cons_idx,
6495 tnapi->prodring.rx_jmb_prod_idx,
6496 tnapi->prodring.rx_jmb_cons_idx);
6500 /* This is called whenever we suspect that the system chipset is re-
6501 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6502 * is bogus tx completions. We try to recover by setting the
6503 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6506 static void tg3_tx_recover(struct tg3 *tp)
6508 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6509 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6511 netdev_warn(tp->dev,
6512 "The system may be re-ordering memory-mapped I/O "
6513 "cycles to the network device, attempting to recover. "
6514 "Please report the problem to the driver maintainer "
6515 "and include system chipset information.\n");
6517 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6520 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6522 /* Tell compiler to fetch tx indices from memory. */
6524 return tnapi->tx_pending -
6525 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6528 /* Tigon3 never reports partial packet sends. So we do not
6529 * need special logic to handle SKBs that have not had all
6530 * of their frags sent yet, like SunGEM does.
6532 static void tg3_tx(struct tg3_napi *tnapi)
6534 struct tg3 *tp = tnapi->tp;
6535 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6536 u32 sw_idx = tnapi->tx_cons;
6537 struct netdev_queue *txq;
6538 int index = tnapi - tp->napi;
6539 unsigned int pkts_compl = 0, bytes_compl = 0;
6541 if (tg3_flag(tp, ENABLE_TSS))
6544 txq = netdev_get_tx_queue(tp->dev, index);
6546 while (sw_idx != hw_idx) {
6547 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6548 struct sk_buff *skb = ri->skb;
6551 if (unlikely(skb == NULL)) {
6556 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6557 struct skb_shared_hwtstamps timestamp;
6558 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6559 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6561 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6563 skb_tstamp_tx(skb, ×tamp);
6566 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6567 skb_headlen(skb), DMA_TO_DEVICE);
6571 while (ri->fragmented) {
6572 ri->fragmented = false;
6573 sw_idx = NEXT_TX(sw_idx);
6574 ri = &tnapi->tx_buffers[sw_idx];
6577 sw_idx = NEXT_TX(sw_idx);
6579 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6580 ri = &tnapi->tx_buffers[sw_idx];
6581 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6584 dma_unmap_page(&tp->pdev->dev,
6585 dma_unmap_addr(ri, mapping),
6586 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6589 while (ri->fragmented) {
6590 ri->fragmented = false;
6591 sw_idx = NEXT_TX(sw_idx);
6592 ri = &tnapi->tx_buffers[sw_idx];
6595 sw_idx = NEXT_TX(sw_idx);
6599 bytes_compl += skb->len;
6601 dev_consume_skb_any(skb);
6603 if (unlikely(tx_bug)) {
6609 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6611 tnapi->tx_cons = sw_idx;
6613 /* Need to make the tx_cons update visible to tg3_start_xmit()
6614 * before checking for netif_queue_stopped(). Without the
6615 * memory barrier, there is a small possibility that tg3_start_xmit()
6616 * will miss it and cause the queue to be stopped forever.
6620 if (unlikely(netif_tx_queue_stopped(txq) &&
6621 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6622 __netif_tx_lock(txq, smp_processor_id());
6623 if (netif_tx_queue_stopped(txq) &&
6624 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6625 netif_tx_wake_queue(txq);
6626 __netif_tx_unlock(txq);
6630 static void tg3_frag_free(bool is_frag, void *data)
6633 skb_free_frag(data);
6638 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6640 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6641 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6646 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6648 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6653 /* Returns size of skb allocated or < 0 on error.
6655 * We only need to fill in the address because the other members
6656 * of the RX descriptor are invariant, see tg3_init_rings.
6658 * Note the purposeful assymetry of cpu vs. chip accesses. For
6659 * posting buffers we only dirty the first cache line of the RX
6660 * descriptor (containing the address). Whereas for the RX status
6661 * buffers the cpu only reads the last cacheline of the RX descriptor
6662 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6664 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6665 u32 opaque_key, u32 dest_idx_unmasked,
6666 unsigned int *frag_size)
6668 struct tg3_rx_buffer_desc *desc;
6669 struct ring_info *map;
6672 int skb_size, data_size, dest_idx;
6674 switch (opaque_key) {
6675 case RXD_OPAQUE_RING_STD:
6676 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6677 desc = &tpr->rx_std[dest_idx];
6678 map = &tpr->rx_std_buffers[dest_idx];
6679 data_size = tp->rx_pkt_map_sz;
6682 case RXD_OPAQUE_RING_JUMBO:
6683 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6684 desc = &tpr->rx_jmb[dest_idx].std;
6685 map = &tpr->rx_jmb_buffers[dest_idx];
6686 data_size = TG3_RX_JMB_MAP_SZ;
6693 /* Do not overwrite any of the map or rp information
6694 * until we are sure we can commit to a new buffer.
6696 * Callers depend upon this behavior and assume that
6697 * we leave everything unchanged if we fail.
6699 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6700 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6701 if (skb_size <= PAGE_SIZE) {
6702 data = napi_alloc_frag(skb_size);
6703 *frag_size = skb_size;
6705 data = kmalloc(skb_size, GFP_ATOMIC);
6711 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6712 data_size, DMA_FROM_DEVICE);
6713 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6714 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6719 dma_unmap_addr_set(map, mapping, mapping);
6721 desc->addr_hi = ((u64)mapping >> 32);
6722 desc->addr_lo = ((u64)mapping & 0xffffffff);
6727 /* We only need to move over in the address because the other
6728 * members of the RX descriptor are invariant. See notes above
6729 * tg3_alloc_rx_data for full details.
6731 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6732 struct tg3_rx_prodring_set *dpr,
6733 u32 opaque_key, int src_idx,
6734 u32 dest_idx_unmasked)
6736 struct tg3 *tp = tnapi->tp;
6737 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6738 struct ring_info *src_map, *dest_map;
6739 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6742 switch (opaque_key) {
6743 case RXD_OPAQUE_RING_STD:
6744 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6745 dest_desc = &dpr->rx_std[dest_idx];
6746 dest_map = &dpr->rx_std_buffers[dest_idx];
6747 src_desc = &spr->rx_std[src_idx];
6748 src_map = &spr->rx_std_buffers[src_idx];
6751 case RXD_OPAQUE_RING_JUMBO:
6752 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6753 dest_desc = &dpr->rx_jmb[dest_idx].std;
6754 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6755 src_desc = &spr->rx_jmb[src_idx].std;
6756 src_map = &spr->rx_jmb_buffers[src_idx];
6763 dest_map->data = src_map->data;
6764 dma_unmap_addr_set(dest_map, mapping,
6765 dma_unmap_addr(src_map, mapping));
6766 dest_desc->addr_hi = src_desc->addr_hi;
6767 dest_desc->addr_lo = src_desc->addr_lo;
6769 /* Ensure that the update to the skb happens after the physical
6770 * addresses have been transferred to the new BD location.
6774 src_map->data = NULL;
6777 /* The RX ring scheme is composed of multiple rings which post fresh
6778 * buffers to the chip, and one special ring the chip uses to report
6779 * status back to the host.
6781 * The special ring reports the status of received packets to the
6782 * host. The chip does not write into the original descriptor the
6783 * RX buffer was obtained from. The chip simply takes the original
6784 * descriptor as provided by the host, updates the status and length
6785 * field, then writes this into the next status ring entry.
6787 * Each ring the host uses to post buffers to the chip is described
6788 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6789 * it is first placed into the on-chip ram. When the packet's length
6790 * is known, it walks down the TG3_BDINFO entries to select the ring.
6791 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6792 * which is within the range of the new packet's length is chosen.
6794 * The "separate ring for rx status" scheme may sound queer, but it makes
6795 * sense from a cache coherency perspective. If only the host writes
6796 * to the buffer post rings, and only the chip writes to the rx status
6797 * rings, then cache lines never move beyond shared-modified state.
6798 * If both the host and chip were to write into the same ring, cache line
6799 * eviction could occur since both entities want it in an exclusive state.
6801 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6803 struct tg3 *tp = tnapi->tp;
6804 u32 work_mask, rx_std_posted = 0;
6805 u32 std_prod_idx, jmb_prod_idx;
6806 u32 sw_idx = tnapi->rx_rcb_ptr;
6809 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6811 hw_idx = *(tnapi->rx_rcb_prod_idx);
6813 * We need to order the read of hw_idx and the read of
6814 * the opaque cookie.
6819 std_prod_idx = tpr->rx_std_prod_idx;
6820 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6821 while (sw_idx != hw_idx && budget > 0) {
6822 struct ring_info *ri;
6823 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6825 struct sk_buff *skb;
6826 dma_addr_t dma_addr;
6827 u32 opaque_key, desc_idx, *post_ptr;
6831 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6832 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6833 if (opaque_key == RXD_OPAQUE_RING_STD) {
6834 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6835 dma_addr = dma_unmap_addr(ri, mapping);
6837 post_ptr = &std_prod_idx;
6839 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6840 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6841 dma_addr = dma_unmap_addr(ri, mapping);
6843 post_ptr = &jmb_prod_idx;
6845 goto next_pkt_nopost;
6847 work_mask |= opaque_key;
6849 if (desc->err_vlan & RXD_ERR_MASK) {
6851 tg3_recycle_rx(tnapi, tpr, opaque_key,
6852 desc_idx, *post_ptr);
6854 /* Other statistics kept track of by card. */
6859 prefetch(data + TG3_RX_OFFSET(tp));
6860 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6863 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6864 RXD_FLAG_PTPSTAT_PTPV1 ||
6865 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6866 RXD_FLAG_PTPSTAT_PTPV2) {
6867 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6868 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6871 if (len > TG3_RX_COPY_THRESH(tp)) {
6873 unsigned int frag_size;
6875 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6876 *post_ptr, &frag_size);
6880 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6883 /* Ensure that the update to the data happens
6884 * after the usage of the old DMA mapping.
6890 skb = build_skb(data, frag_size);
6892 tg3_frag_free(frag_size != 0, data);
6893 goto drop_it_no_recycle;
6895 skb_reserve(skb, TG3_RX_OFFSET(tp));
6897 tg3_recycle_rx(tnapi, tpr, opaque_key,
6898 desc_idx, *post_ptr);
6900 skb = netdev_alloc_skb(tp->dev,
6901 len + TG3_RAW_IP_ALIGN);
6903 goto drop_it_no_recycle;
6905 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6906 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6909 data + TG3_RX_OFFSET(tp),
6911 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6912 len, DMA_FROM_DEVICE);
6917 tg3_hwclock_to_timestamp(tp, tstamp,
6918 skb_hwtstamps(skb));
6920 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6921 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6922 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6923 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6924 skb->ip_summed = CHECKSUM_UNNECESSARY;
6926 skb_checksum_none_assert(skb);
6928 skb->protocol = eth_type_trans(skb, tp->dev);
6930 if (len > (tp->dev->mtu + ETH_HLEN) &&
6931 skb->protocol != htons(ETH_P_8021Q) &&
6932 skb->protocol != htons(ETH_P_8021AD)) {
6933 dev_kfree_skb_any(skb);
6934 goto drop_it_no_recycle;
6937 if (desc->type_flags & RXD_FLAG_VLAN &&
6938 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6939 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6940 desc->err_vlan & RXD_VLAN_MASK);
6942 napi_gro_receive(&tnapi->napi, skb);
6950 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6951 tpr->rx_std_prod_idx = std_prod_idx &
6952 tp->rx_std_ring_mask;
6953 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6954 tpr->rx_std_prod_idx);
6955 work_mask &= ~RXD_OPAQUE_RING_STD;
6960 sw_idx &= tp->rx_ret_ring_mask;
6962 /* Refresh hw_idx to see if there is new work */
6963 if (sw_idx == hw_idx) {
6964 hw_idx = *(tnapi->rx_rcb_prod_idx);
6969 /* ACK the status ring. */
6970 tnapi->rx_rcb_ptr = sw_idx;
6971 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6973 /* Refill RX ring(s). */
6974 if (!tg3_flag(tp, ENABLE_RSS)) {
6975 /* Sync BD data before updating mailbox */
6978 if (work_mask & RXD_OPAQUE_RING_STD) {
6979 tpr->rx_std_prod_idx = std_prod_idx &
6980 tp->rx_std_ring_mask;
6981 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6982 tpr->rx_std_prod_idx);
6984 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6985 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6986 tp->rx_jmb_ring_mask;
6987 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6988 tpr->rx_jmb_prod_idx);
6990 } else if (work_mask) {
6991 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6992 * updated before the producer indices can be updated.
6996 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6997 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6999 if (tnapi != &tp->napi[1]) {
7000 tp->rx_refill = true;
7001 napi_schedule(&tp->napi[1].napi);
7008 static void tg3_poll_link(struct tg3 *tp)
7010 /* handle link change and other phy events */
7011 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7012 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7014 if (sblk->status & SD_STATUS_LINK_CHG) {
7015 sblk->status = SD_STATUS_UPDATED |
7016 (sblk->status & ~SD_STATUS_LINK_CHG);
7017 spin_lock(&tp->lock);
7018 if (tg3_flag(tp, USE_PHYLIB)) {
7020 (MAC_STATUS_SYNC_CHANGED |
7021 MAC_STATUS_CFG_CHANGED |
7022 MAC_STATUS_MI_COMPLETION |
7023 MAC_STATUS_LNKSTATE_CHANGED));
7026 tg3_setup_phy(tp, false);
7027 spin_unlock(&tp->lock);
7032 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7033 struct tg3_rx_prodring_set *dpr,
7034 struct tg3_rx_prodring_set *spr)
7036 u32 si, di, cpycnt, src_prod_idx;
7040 src_prod_idx = spr->rx_std_prod_idx;
7042 /* Make sure updates to the rx_std_buffers[] entries and the
7043 * standard producer index are seen in the correct order.
7047 if (spr->rx_std_cons_idx == src_prod_idx)
7050 if (spr->rx_std_cons_idx < src_prod_idx)
7051 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7053 cpycnt = tp->rx_std_ring_mask + 1 -
7054 spr->rx_std_cons_idx;
7056 cpycnt = min(cpycnt,
7057 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7059 si = spr->rx_std_cons_idx;
7060 di = dpr->rx_std_prod_idx;
7062 for (i = di; i < di + cpycnt; i++) {
7063 if (dpr->rx_std_buffers[i].data) {
7073 /* Ensure that updates to the rx_std_buffers ring and the
7074 * shadowed hardware producer ring from tg3_recycle_skb() are
7075 * ordered correctly WRT the skb check above.
7079 memcpy(&dpr->rx_std_buffers[di],
7080 &spr->rx_std_buffers[si],
7081 cpycnt * sizeof(struct ring_info));
7083 for (i = 0; i < cpycnt; i++, di++, si++) {
7084 struct tg3_rx_buffer_desc *sbd, *dbd;
7085 sbd = &spr->rx_std[si];
7086 dbd = &dpr->rx_std[di];
7087 dbd->addr_hi = sbd->addr_hi;
7088 dbd->addr_lo = sbd->addr_lo;
7091 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7092 tp->rx_std_ring_mask;
7093 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7094 tp->rx_std_ring_mask;
7098 src_prod_idx = spr->rx_jmb_prod_idx;
7100 /* Make sure updates to the rx_jmb_buffers[] entries and
7101 * the jumbo producer index are seen in the correct order.
7105 if (spr->rx_jmb_cons_idx == src_prod_idx)
7108 if (spr->rx_jmb_cons_idx < src_prod_idx)
7109 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7111 cpycnt = tp->rx_jmb_ring_mask + 1 -
7112 spr->rx_jmb_cons_idx;
7114 cpycnt = min(cpycnt,
7115 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7117 si = spr->rx_jmb_cons_idx;
7118 di = dpr->rx_jmb_prod_idx;
7120 for (i = di; i < di + cpycnt; i++) {
7121 if (dpr->rx_jmb_buffers[i].data) {
7131 /* Ensure that updates to the rx_jmb_buffers ring and the
7132 * shadowed hardware producer ring from tg3_recycle_skb() are
7133 * ordered correctly WRT the skb check above.
7137 memcpy(&dpr->rx_jmb_buffers[di],
7138 &spr->rx_jmb_buffers[si],
7139 cpycnt * sizeof(struct ring_info));
7141 for (i = 0; i < cpycnt; i++, di++, si++) {
7142 struct tg3_rx_buffer_desc *sbd, *dbd;
7143 sbd = &spr->rx_jmb[si].std;
7144 dbd = &dpr->rx_jmb[di].std;
7145 dbd->addr_hi = sbd->addr_hi;
7146 dbd->addr_lo = sbd->addr_lo;
7149 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7150 tp->rx_jmb_ring_mask;
7151 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7152 tp->rx_jmb_ring_mask;
7158 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7160 struct tg3 *tp = tnapi->tp;
7162 /* run TX completion thread */
7163 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7165 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7169 if (!tnapi->rx_rcb_prod_idx)
7172 /* run RX thread, within the bounds set by NAPI.
7173 * All RX "locking" is done by ensuring outside
7174 * code synchronizes with tg3->napi.poll()
7176 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7177 work_done += tg3_rx(tnapi, budget - work_done);
7179 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7180 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7182 u32 std_prod_idx = dpr->rx_std_prod_idx;
7183 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7185 tp->rx_refill = false;
7186 for (i = 1; i <= tp->rxq_cnt; i++)
7187 err |= tg3_rx_prodring_xfer(tp, dpr,
7188 &tp->napi[i].prodring);
7192 if (std_prod_idx != dpr->rx_std_prod_idx)
7193 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7194 dpr->rx_std_prod_idx);
7196 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7197 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7198 dpr->rx_jmb_prod_idx);
7201 tw32_f(HOSTCC_MODE, tp->coal_now);
7207 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7209 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7210 schedule_work(&tp->reset_task);
7213 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7215 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7216 cancel_work_sync(&tp->reset_task);
7217 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7220 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7222 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7223 struct tg3 *tp = tnapi->tp;
7225 struct tg3_hw_status *sblk = tnapi->hw_status;
7228 work_done = tg3_poll_work(tnapi, work_done, budget);
7230 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7233 if (unlikely(work_done >= budget))
7236 /* tp->last_tag is used in tg3_int_reenable() below
7237 * to tell the hw how much work has been processed,
7238 * so we must read it before checking for more work.
7240 tnapi->last_tag = sblk->status_tag;
7241 tnapi->last_irq_tag = tnapi->last_tag;
7244 /* check for RX/TX work to do */
7245 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7246 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7248 /* This test here is not race free, but will reduce
7249 * the number of interrupts by looping again.
7251 if (tnapi == &tp->napi[1] && tp->rx_refill)
7254 napi_complete_done(napi, work_done);
7255 /* Reenable interrupts. */
7256 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7258 /* This test here is synchronized by napi_schedule()
7259 * and napi_complete() to close the race condition.
7261 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7262 tw32(HOSTCC_MODE, tp->coalesce_mode |
7263 HOSTCC_MODE_ENABLE |
7270 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7274 /* work_done is guaranteed to be less than budget. */
7275 napi_complete(napi);
7276 tg3_reset_task_schedule(tp);
7280 static void tg3_process_error(struct tg3 *tp)
7283 bool real_error = false;
7285 if (tg3_flag(tp, ERROR_PROCESSED))
7288 /* Check Flow Attention register */
7289 val = tr32(HOSTCC_FLOW_ATTN);
7290 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7291 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7295 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7296 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7300 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7301 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7310 tg3_flag_set(tp, ERROR_PROCESSED);
7311 tg3_reset_task_schedule(tp);
7314 static int tg3_poll(struct napi_struct *napi, int budget)
7316 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7317 struct tg3 *tp = tnapi->tp;
7319 struct tg3_hw_status *sblk = tnapi->hw_status;
7322 if (sblk->status & SD_STATUS_ERROR)
7323 tg3_process_error(tp);
7327 work_done = tg3_poll_work(tnapi, work_done, budget);
7329 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7332 if (unlikely(work_done >= budget))
7335 if (tg3_flag(tp, TAGGED_STATUS)) {
7336 /* tp->last_tag is used in tg3_int_reenable() below
7337 * to tell the hw how much work has been processed,
7338 * so we must read it before checking for more work.
7340 tnapi->last_tag = sblk->status_tag;
7341 tnapi->last_irq_tag = tnapi->last_tag;
7344 sblk->status &= ~SD_STATUS_UPDATED;
7346 if (likely(!tg3_has_work(tnapi))) {
7347 napi_complete_done(napi, work_done);
7348 tg3_int_reenable(tnapi);
7353 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7357 /* work_done is guaranteed to be less than budget. */
7358 napi_complete(napi);
7359 tg3_reset_task_schedule(tp);
7363 static void tg3_napi_disable(struct tg3 *tp)
7367 for (i = tp->irq_cnt - 1; i >= 0; i--)
7368 napi_disable(&tp->napi[i].napi);
7371 static void tg3_napi_enable(struct tg3 *tp)
7375 for (i = 0; i < tp->irq_cnt; i++)
7376 napi_enable(&tp->napi[i].napi);
7379 static void tg3_napi_init(struct tg3 *tp)
7383 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7384 for (i = 1; i < tp->irq_cnt; i++)
7385 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7388 static void tg3_napi_fini(struct tg3 *tp)
7392 for (i = 0; i < tp->irq_cnt; i++)
7393 netif_napi_del(&tp->napi[i].napi);
7396 static inline void tg3_netif_stop(struct tg3 *tp)
7398 netif_trans_update(tp->dev); /* prevent tx timeout */
7399 tg3_napi_disable(tp);
7400 netif_carrier_off(tp->dev);
7401 netif_tx_disable(tp->dev);
7404 /* tp->lock must be held */
7405 static inline void tg3_netif_start(struct tg3 *tp)
7409 /* NOTE: unconditional netif_tx_wake_all_queues is only
7410 * appropriate so long as all callers are assured to
7411 * have free tx slots (such as after tg3_init_hw)
7413 netif_tx_wake_all_queues(tp->dev);
7416 netif_carrier_on(tp->dev);
7418 tg3_napi_enable(tp);
7419 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7420 tg3_enable_ints(tp);
7423 static void tg3_irq_quiesce(struct tg3 *tp)
7424 __releases(tp->lock)
7425 __acquires(tp->lock)
7429 BUG_ON(tp->irq_sync);
7434 spin_unlock_bh(&tp->lock);
7436 for (i = 0; i < tp->irq_cnt; i++)
7437 synchronize_irq(tp->napi[i].irq_vec);
7439 spin_lock_bh(&tp->lock);
7442 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7443 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7444 * with as well. Most of the time, this is not necessary except when
7445 * shutting down the device.
7447 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7449 spin_lock_bh(&tp->lock);
7451 tg3_irq_quiesce(tp);
7454 static inline void tg3_full_unlock(struct tg3 *tp)
7456 spin_unlock_bh(&tp->lock);
7459 /* One-shot MSI handler - Chip automatically disables interrupt
7460 * after sending MSI so driver doesn't have to do it.
7462 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7464 struct tg3_napi *tnapi = dev_id;
7465 struct tg3 *tp = tnapi->tp;
7467 prefetch(tnapi->hw_status);
7469 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7471 if (likely(!tg3_irq_sync(tp)))
7472 napi_schedule(&tnapi->napi);
7477 /* MSI ISR - No need to check for interrupt sharing and no need to
7478 * flush status block and interrupt mailbox. PCI ordering rules
7479 * guarantee that MSI will arrive after the status block.
7481 static irqreturn_t tg3_msi(int irq, void *dev_id)
7483 struct tg3_napi *tnapi = dev_id;
7484 struct tg3 *tp = tnapi->tp;
7486 prefetch(tnapi->hw_status);
7488 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7490 * Writing any value to intr-mbox-0 clears PCI INTA# and
7491 * chip-internal interrupt pending events.
7492 * Writing non-zero to intr-mbox-0 additional tells the
7493 * NIC to stop sending us irqs, engaging "in-intr-handler"
7496 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7497 if (likely(!tg3_irq_sync(tp)))
7498 napi_schedule(&tnapi->napi);
7500 return IRQ_RETVAL(1);
7503 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7505 struct tg3_napi *tnapi = dev_id;
7506 struct tg3 *tp = tnapi->tp;
7507 struct tg3_hw_status *sblk = tnapi->hw_status;
7508 unsigned int handled = 1;
7510 /* In INTx mode, it is possible for the interrupt to arrive at
7511 * the CPU before the status block posted prior to the interrupt.
7512 * Reading the PCI State register will confirm whether the
7513 * interrupt is ours and will flush the status block.
7515 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7516 if (tg3_flag(tp, CHIP_RESETTING) ||
7517 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7524 * Writing any value to intr-mbox-0 clears PCI INTA# and
7525 * chip-internal interrupt pending events.
7526 * Writing non-zero to intr-mbox-0 additional tells the
7527 * NIC to stop sending us irqs, engaging "in-intr-handler"
7530 * Flush the mailbox to de-assert the IRQ immediately to prevent
7531 * spurious interrupts. The flush impacts performance but
7532 * excessive spurious interrupts can be worse in some cases.
7534 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7535 if (tg3_irq_sync(tp))
7537 sblk->status &= ~SD_STATUS_UPDATED;
7538 if (likely(tg3_has_work(tnapi))) {
7539 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7540 napi_schedule(&tnapi->napi);
7542 /* No work, shared interrupt perhaps? re-enable
7543 * interrupts, and flush that PCI write
7545 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7549 return IRQ_RETVAL(handled);
7552 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7554 struct tg3_napi *tnapi = dev_id;
7555 struct tg3 *tp = tnapi->tp;
7556 struct tg3_hw_status *sblk = tnapi->hw_status;
7557 unsigned int handled = 1;
7559 /* In INTx mode, it is possible for the interrupt to arrive at
7560 * the CPU before the status block posted prior to the interrupt.
7561 * Reading the PCI State register will confirm whether the
7562 * interrupt is ours and will flush the status block.
7564 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7565 if (tg3_flag(tp, CHIP_RESETTING) ||
7566 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7573 * writing any value to intr-mbox-0 clears PCI INTA# and
7574 * chip-internal interrupt pending events.
7575 * writing non-zero to intr-mbox-0 additional tells the
7576 * NIC to stop sending us irqs, engaging "in-intr-handler"
7579 * Flush the mailbox to de-assert the IRQ immediately to prevent
7580 * spurious interrupts. The flush impacts performance but
7581 * excessive spurious interrupts can be worse in some cases.
7583 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7586 * In a shared interrupt configuration, sometimes other devices'
7587 * interrupts will scream. We record the current status tag here
7588 * so that the above check can report that the screaming interrupts
7589 * are unhandled. Eventually they will be silenced.
7591 tnapi->last_irq_tag = sblk->status_tag;
7593 if (tg3_irq_sync(tp))
7596 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7598 napi_schedule(&tnapi->napi);
7601 return IRQ_RETVAL(handled);
7604 /* ISR for interrupt test */
7605 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7607 struct tg3_napi *tnapi = dev_id;
7608 struct tg3 *tp = tnapi->tp;
7609 struct tg3_hw_status *sblk = tnapi->hw_status;
7611 if ((sblk->status & SD_STATUS_UPDATED) ||
7612 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7613 tg3_disable_ints(tp);
7614 return IRQ_RETVAL(1);
7616 return IRQ_RETVAL(0);
7619 #ifdef CONFIG_NET_POLL_CONTROLLER
7620 static void tg3_poll_controller(struct net_device *dev)
7623 struct tg3 *tp = netdev_priv(dev);
7625 if (tg3_irq_sync(tp))
7628 for (i = 0; i < tp->irq_cnt; i++)
7629 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7633 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7635 struct tg3 *tp = netdev_priv(dev);
7637 if (netif_msg_tx_err(tp)) {
7638 netdev_err(dev, "transmit timed out, resetting\n");
7642 tg3_reset_task_schedule(tp);
7645 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7646 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7648 u32 base = (u32) mapping & 0xffffffff;
7650 return base + len + 8 < base;
7653 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7654 * of any 4GB boundaries: 4G, 8G, etc
7656 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7659 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7660 u32 base = (u32) mapping & 0xffffffff;
7662 return ((base + len + (mss & 0x3fff)) < base);
7667 /* Test for DMA addresses > 40-bit */
7668 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7671 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7672 if (tg3_flag(tp, 40BIT_DMA_BUG))
7673 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7680 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7681 dma_addr_t mapping, u32 len, u32 flags,
7684 txbd->addr_hi = ((u64) mapping >> 32);
7685 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7686 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7687 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7690 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7691 dma_addr_t map, u32 len, u32 flags,
7694 struct tg3 *tp = tnapi->tp;
7697 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7700 if (tg3_4g_overflow_test(map, len))
7703 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7706 if (tg3_40bit_overflow_test(tp, map, len))
7709 if (tp->dma_limit) {
7710 u32 prvidx = *entry;
7711 u32 tmp_flag = flags & ~TXD_FLAG_END;
7712 while (len > tp->dma_limit && *budget) {
7713 u32 frag_len = tp->dma_limit;
7714 len -= tp->dma_limit;
7716 /* Avoid the 8byte DMA problem */
7718 len += tp->dma_limit / 2;
7719 frag_len = tp->dma_limit / 2;
7722 tnapi->tx_buffers[*entry].fragmented = true;
7724 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7725 frag_len, tmp_flag, mss, vlan);
7728 *entry = NEXT_TX(*entry);
7735 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7736 len, flags, mss, vlan);
7738 *entry = NEXT_TX(*entry);
7741 tnapi->tx_buffers[prvidx].fragmented = false;
7745 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7746 len, flags, mss, vlan);
7747 *entry = NEXT_TX(*entry);
7753 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7756 struct sk_buff *skb;
7757 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7762 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7763 skb_headlen(skb), DMA_TO_DEVICE);
7765 while (txb->fragmented) {
7766 txb->fragmented = false;
7767 entry = NEXT_TX(entry);
7768 txb = &tnapi->tx_buffers[entry];
7771 for (i = 0; i <= last; i++) {
7772 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7774 entry = NEXT_TX(entry);
7775 txb = &tnapi->tx_buffers[entry];
7777 dma_unmap_page(&tnapi->tp->pdev->dev,
7778 dma_unmap_addr(txb, mapping),
7779 skb_frag_size(frag), DMA_TO_DEVICE);
7781 while (txb->fragmented) {
7782 txb->fragmented = false;
7783 entry = NEXT_TX(entry);
7784 txb = &tnapi->tx_buffers[entry];
7789 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7790 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7791 struct sk_buff **pskb,
7792 u32 *entry, u32 *budget,
7793 u32 base_flags, u32 mss, u32 vlan)
7795 struct tg3 *tp = tnapi->tp;
7796 struct sk_buff *new_skb, *skb = *pskb;
7797 dma_addr_t new_addr = 0;
7800 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7801 new_skb = skb_copy(skb, GFP_ATOMIC);
7803 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7805 new_skb = skb_copy_expand(skb,
7806 skb_headroom(skb) + more_headroom,
7807 skb_tailroom(skb), GFP_ATOMIC);
7813 /* New SKB is guaranteed to be linear. */
7814 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7815 new_skb->len, DMA_TO_DEVICE);
7816 /* Make sure the mapping succeeded */
7817 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7818 dev_kfree_skb_any(new_skb);
7821 u32 save_entry = *entry;
7823 base_flags |= TXD_FLAG_END;
7825 tnapi->tx_buffers[*entry].skb = new_skb;
7826 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7829 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7830 new_skb->len, base_flags,
7832 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7833 dev_kfree_skb_any(new_skb);
7839 dev_consume_skb_any(skb);
7844 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7846 /* Check if we will never have enough descriptors,
7847 * as gso_segs can be more than current ring size
7849 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7852 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7854 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7855 * indicated in tg3_tx_frag_set()
7857 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7858 struct netdev_queue *txq, struct sk_buff *skb)
7860 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7861 struct sk_buff *segs, *seg, *next;
7863 /* Estimate the number of fragments in the worst case */
7864 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7865 netif_tx_stop_queue(txq);
7867 /* netif_tx_stop_queue() must be done before checking
7868 * checking tx index in tg3_tx_avail() below, because in
7869 * tg3_tx(), we update tx index before checking for
7870 * netif_tx_queue_stopped().
7873 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7874 return NETDEV_TX_BUSY;
7876 netif_tx_wake_queue(txq);
7879 segs = skb_gso_segment(skb, tp->dev->features &
7880 ~(NETIF_F_TSO | NETIF_F_TSO6));
7881 if (IS_ERR(segs) || !segs)
7882 goto tg3_tso_bug_end;
7884 skb_list_walk_safe(segs, seg, next) {
7885 skb_mark_not_on_list(seg);
7886 tg3_start_xmit(seg, tp->dev);
7890 dev_consume_skb_any(skb);
7892 return NETDEV_TX_OK;
7895 /* hard_start_xmit for all devices */
7896 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7898 struct tg3 *tp = netdev_priv(dev);
7899 u32 len, entry, base_flags, mss, vlan = 0;
7901 int i = -1, would_hit_hwbug;
7903 struct tg3_napi *tnapi;
7904 struct netdev_queue *txq;
7906 struct iphdr *iph = NULL;
7907 struct tcphdr *tcph = NULL;
7908 __sum16 tcp_csum = 0, ip_csum = 0;
7909 __be16 ip_tot_len = 0;
7911 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7912 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7913 if (tg3_flag(tp, ENABLE_TSS))
7916 budget = tg3_tx_avail(tnapi);
7918 /* We are running in BH disabled context with netif_tx_lock
7919 * and TX reclaim runs via tp->napi.poll inside of a software
7920 * interrupt. Furthermore, IRQ processing runs lockless so we have
7921 * no IRQ context deadlocks to worry about either. Rejoice!
7923 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7924 if (!netif_tx_queue_stopped(txq)) {
7925 netif_tx_stop_queue(txq);
7927 /* This is a hard error, log it. */
7929 "BUG! Tx Ring full when queue awake!\n");
7931 return NETDEV_TX_BUSY;
7934 entry = tnapi->tx_prod;
7937 mss = skb_shinfo(skb)->gso_size;
7939 u32 tcp_opt_len, hdr_len;
7941 if (skb_cow_head(skb, 0))
7945 tcp_opt_len = tcp_optlen(skb);
7947 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7949 /* HW/FW can not correctly segment packets that have been
7950 * vlan encapsulated.
7952 if (skb->protocol == htons(ETH_P_8021Q) ||
7953 skb->protocol == htons(ETH_P_8021AD)) {
7954 if (tg3_tso_bug_gso_check(tnapi, skb))
7955 return tg3_tso_bug(tp, tnapi, txq, skb);
7959 if (!skb_is_gso_v6(skb)) {
7960 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7961 tg3_flag(tp, TSO_BUG)) {
7962 if (tg3_tso_bug_gso_check(tnapi, skb))
7963 return tg3_tso_bug(tp, tnapi, txq, skb);
7966 ip_csum = iph->check;
7967 ip_tot_len = iph->tot_len;
7969 iph->tot_len = htons(mss + hdr_len);
7972 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7973 TXD_FLAG_CPU_POST_DMA);
7975 tcph = tcp_hdr(skb);
7976 tcp_csum = tcph->check;
7978 if (tg3_flag(tp, HW_TSO_1) ||
7979 tg3_flag(tp, HW_TSO_2) ||
7980 tg3_flag(tp, HW_TSO_3)) {
7982 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7984 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7988 if (tg3_flag(tp, HW_TSO_3)) {
7989 mss |= (hdr_len & 0xc) << 12;
7991 base_flags |= 0x00000010;
7992 base_flags |= (hdr_len & 0x3e0) << 5;
7993 } else if (tg3_flag(tp, HW_TSO_2))
7994 mss |= hdr_len << 9;
7995 else if (tg3_flag(tp, HW_TSO_1) ||
7996 tg3_asic_rev(tp) == ASIC_REV_5705) {
7997 if (tcp_opt_len || iph->ihl > 5) {
8000 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8001 mss |= (tsflags << 11);
8004 if (tcp_opt_len || iph->ihl > 5) {
8007 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8008 base_flags |= tsflags << 12;
8011 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8012 /* HW/FW can not correctly checksum packets that have been
8013 * vlan encapsulated.
8015 if (skb->protocol == htons(ETH_P_8021Q) ||
8016 skb->protocol == htons(ETH_P_8021AD)) {
8017 if (skb_checksum_help(skb))
8020 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8024 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8025 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8026 base_flags |= TXD_FLAG_JMB_PKT;
8028 if (skb_vlan_tag_present(skb)) {
8029 base_flags |= TXD_FLAG_VLAN;
8030 vlan = skb_vlan_tag_get(skb);
8033 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8034 tg3_flag(tp, TX_TSTAMP_EN)) {
8035 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8036 base_flags |= TXD_FLAG_HWTSTAMP;
8039 len = skb_headlen(skb);
8041 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8043 if (dma_mapping_error(&tp->pdev->dev, mapping))
8047 tnapi->tx_buffers[entry].skb = skb;
8048 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8050 would_hit_hwbug = 0;
8052 if (tg3_flag(tp, 5701_DMA_BUG))
8053 would_hit_hwbug = 1;
8055 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8056 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8058 would_hit_hwbug = 1;
8059 } else if (skb_shinfo(skb)->nr_frags > 0) {
8062 if (!tg3_flag(tp, HW_TSO_1) &&
8063 !tg3_flag(tp, HW_TSO_2) &&
8064 !tg3_flag(tp, HW_TSO_3))
8067 /* Now loop through additional data
8068 * fragments, and queue them.
8070 last = skb_shinfo(skb)->nr_frags - 1;
8071 for (i = 0; i <= last; i++) {
8072 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8074 len = skb_frag_size(frag);
8075 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8076 len, DMA_TO_DEVICE);
8078 tnapi->tx_buffers[entry].skb = NULL;
8079 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8081 if (dma_mapping_error(&tp->pdev->dev, mapping))
8085 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8087 ((i == last) ? TXD_FLAG_END : 0),
8089 would_hit_hwbug = 1;
8095 if (would_hit_hwbug) {
8096 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8098 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8099 /* If it's a TSO packet, do GSO instead of
8100 * allocating and copying to a large linear SKB
8103 iph->check = ip_csum;
8104 iph->tot_len = ip_tot_len;
8106 tcph->check = tcp_csum;
8107 return tg3_tso_bug(tp, tnapi, txq, skb);
8110 /* If the workaround fails due to memory/mapping
8111 * failure, silently drop this packet.
8113 entry = tnapi->tx_prod;
8114 budget = tg3_tx_avail(tnapi);
8115 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8116 base_flags, mss, vlan))
8120 skb_tx_timestamp(skb);
8121 netdev_tx_sent_queue(txq, skb->len);
8123 /* Sync BD data before updating mailbox */
8126 tnapi->tx_prod = entry;
8127 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8128 netif_tx_stop_queue(txq);
8130 /* netif_tx_stop_queue() must be done before checking
8131 * checking tx index in tg3_tx_avail() below, because in
8132 * tg3_tx(), we update tx index before checking for
8133 * netif_tx_queue_stopped().
8136 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8137 netif_tx_wake_queue(txq);
8140 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8141 /* Packets are ready, update Tx producer idx on card. */
8142 tw32_tx_mbox(tnapi->prodmbox, entry);
8145 return NETDEV_TX_OK;
8148 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8149 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8151 dev_kfree_skb_any(skb);
8154 return NETDEV_TX_OK;
8157 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8160 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8161 MAC_MODE_PORT_MODE_MASK);
8163 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8165 if (!tg3_flag(tp, 5705_PLUS))
8166 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8168 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8169 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8171 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8173 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8175 if (tg3_flag(tp, 5705_PLUS) ||
8176 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8177 tg3_asic_rev(tp) == ASIC_REV_5700)
8178 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8181 tw32(MAC_MODE, tp->mac_mode);
8185 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8187 u32 val, bmcr, mac_mode, ptest = 0;
8189 tg3_phy_toggle_apd(tp, false);
8190 tg3_phy_toggle_automdix(tp, false);
8192 if (extlpbk && tg3_phy_set_extloopbk(tp))
8195 bmcr = BMCR_FULLDPLX;
8200 bmcr |= BMCR_SPEED100;
8204 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8206 bmcr |= BMCR_SPEED100;
8209 bmcr |= BMCR_SPEED1000;
8214 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8215 tg3_readphy(tp, MII_CTRL1000, &val);
8216 val |= CTL1000_AS_MASTER |
8217 CTL1000_ENABLE_MASTER;
8218 tg3_writephy(tp, MII_CTRL1000, val);
8220 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8221 MII_TG3_FET_PTEST_TRIM_2;
8222 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8225 bmcr |= BMCR_LOOPBACK;
8227 tg3_writephy(tp, MII_BMCR, bmcr);
8229 /* The write needs to be flushed for the FETs */
8230 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8231 tg3_readphy(tp, MII_BMCR, &bmcr);
8235 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8236 tg3_asic_rev(tp) == ASIC_REV_5785) {
8237 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8238 MII_TG3_FET_PTEST_FRC_TX_LINK |
8239 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8241 /* The write needs to be flushed for the AC131 */
8242 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8245 /* Reset to prevent losing 1st rx packet intermittently */
8246 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8247 tg3_flag(tp, 5780_CLASS)) {
8248 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8250 tw32_f(MAC_RX_MODE, tp->rx_mode);
8253 mac_mode = tp->mac_mode &
8254 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8255 if (speed == SPEED_1000)
8256 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8258 mac_mode |= MAC_MODE_PORT_MODE_MII;
8260 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8261 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8263 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8264 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8265 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8266 mac_mode |= MAC_MODE_LINK_POLARITY;
8268 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8269 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8272 tw32(MAC_MODE, mac_mode);
8278 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8280 struct tg3 *tp = netdev_priv(dev);
8282 if (features & NETIF_F_LOOPBACK) {
8283 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8286 spin_lock_bh(&tp->lock);
8287 tg3_mac_loopback(tp, true);
8288 netif_carrier_on(tp->dev);
8289 spin_unlock_bh(&tp->lock);
8290 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8292 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8295 spin_lock_bh(&tp->lock);
8296 tg3_mac_loopback(tp, false);
8297 /* Force link status check */
8298 tg3_setup_phy(tp, true);
8299 spin_unlock_bh(&tp->lock);
8300 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8304 static netdev_features_t tg3_fix_features(struct net_device *dev,
8305 netdev_features_t features)
8307 struct tg3 *tp = netdev_priv(dev);
8309 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8310 features &= ~NETIF_F_ALL_TSO;
8315 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8317 netdev_features_t changed = dev->features ^ features;
8319 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8320 tg3_set_loopback(dev, features);
8325 static void tg3_rx_prodring_free(struct tg3 *tp,
8326 struct tg3_rx_prodring_set *tpr)
8330 if (tpr != &tp->napi[0].prodring) {
8331 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8332 i = (i + 1) & tp->rx_std_ring_mask)
8333 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8336 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8337 for (i = tpr->rx_jmb_cons_idx;
8338 i != tpr->rx_jmb_prod_idx;
8339 i = (i + 1) & tp->rx_jmb_ring_mask) {
8340 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8348 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8349 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8352 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8353 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8354 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8359 /* Initialize rx rings for packet processing.
8361 * The chip has been shut down and the driver detached from
8362 * the networking, so no interrupts or new tx packets will
8363 * end up in the driver. tp->{tx,}lock are held and thus
8366 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8367 struct tg3_rx_prodring_set *tpr)
8369 u32 i, rx_pkt_dma_sz;
8371 tpr->rx_std_cons_idx = 0;
8372 tpr->rx_std_prod_idx = 0;
8373 tpr->rx_jmb_cons_idx = 0;
8374 tpr->rx_jmb_prod_idx = 0;
8376 if (tpr != &tp->napi[0].prodring) {
8377 memset(&tpr->rx_std_buffers[0], 0,
8378 TG3_RX_STD_BUFF_RING_SIZE(tp));
8379 if (tpr->rx_jmb_buffers)
8380 memset(&tpr->rx_jmb_buffers[0], 0,
8381 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8385 /* Zero out all descriptors. */
8386 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8388 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8389 if (tg3_flag(tp, 5780_CLASS) &&
8390 tp->dev->mtu > ETH_DATA_LEN)
8391 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8392 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8394 /* Initialize invariants of the rings, we only set this
8395 * stuff once. This works because the card does not
8396 * write into the rx buffer posting rings.
8398 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8399 struct tg3_rx_buffer_desc *rxd;
8401 rxd = &tpr->rx_std[i];
8402 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8403 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8404 rxd->opaque = (RXD_OPAQUE_RING_STD |
8405 (i << RXD_OPAQUE_INDEX_SHIFT));
8408 /* Now allocate fresh SKBs for each rx ring. */
8409 for (i = 0; i < tp->rx_pending; i++) {
8410 unsigned int frag_size;
8412 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8414 netdev_warn(tp->dev,
8415 "Using a smaller RX standard ring. Only "
8416 "%d out of %d buffers were allocated "
8417 "successfully\n", i, tp->rx_pending);
8425 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8428 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8430 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8433 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8434 struct tg3_rx_buffer_desc *rxd;
8436 rxd = &tpr->rx_jmb[i].std;
8437 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8438 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8440 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8441 (i << RXD_OPAQUE_INDEX_SHIFT));
8444 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8445 unsigned int frag_size;
8447 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8449 netdev_warn(tp->dev,
8450 "Using a smaller RX jumbo ring. Only %d "
8451 "out of %d buffers were allocated "
8452 "successfully\n", i, tp->rx_jumbo_pending);
8455 tp->rx_jumbo_pending = i;
8464 tg3_rx_prodring_free(tp, tpr);
8468 static void tg3_rx_prodring_fini(struct tg3 *tp,
8469 struct tg3_rx_prodring_set *tpr)
8471 kfree(tpr->rx_std_buffers);
8472 tpr->rx_std_buffers = NULL;
8473 kfree(tpr->rx_jmb_buffers);
8474 tpr->rx_jmb_buffers = NULL;
8476 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8477 tpr->rx_std, tpr->rx_std_mapping);
8481 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8482 tpr->rx_jmb, tpr->rx_jmb_mapping);
8487 static int tg3_rx_prodring_init(struct tg3 *tp,
8488 struct tg3_rx_prodring_set *tpr)
8490 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8492 if (!tpr->rx_std_buffers)
8495 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8496 TG3_RX_STD_RING_BYTES(tp),
8497 &tpr->rx_std_mapping,
8502 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8503 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8505 if (!tpr->rx_jmb_buffers)
8508 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8509 TG3_RX_JMB_RING_BYTES(tp),
8510 &tpr->rx_jmb_mapping,
8519 tg3_rx_prodring_fini(tp, tpr);
8523 /* Free up pending packets in all rx/tx rings.
8525 * The chip has been shut down and the driver detached from
8526 * the networking, so no interrupts or new tx packets will
8527 * end up in the driver. tp->{tx,}lock is not held and we are not
8528 * in an interrupt context and thus may sleep.
8530 static void tg3_free_rings(struct tg3 *tp)
8534 for (j = 0; j < tp->irq_cnt; j++) {
8535 struct tg3_napi *tnapi = &tp->napi[j];
8537 tg3_rx_prodring_free(tp, &tnapi->prodring);
8539 if (!tnapi->tx_buffers)
8542 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8543 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8548 tg3_tx_skb_unmap(tnapi, i,
8549 skb_shinfo(skb)->nr_frags - 1);
8551 dev_consume_skb_any(skb);
8553 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8557 /* Initialize tx/rx rings for packet processing.
8559 * The chip has been shut down and the driver detached from
8560 * the networking, so no interrupts or new tx packets will
8561 * end up in the driver. tp->{tx,}lock are held and thus
8564 static int tg3_init_rings(struct tg3 *tp)
8568 /* Free up all the SKBs. */
8571 for (i = 0; i < tp->irq_cnt; i++) {
8572 struct tg3_napi *tnapi = &tp->napi[i];
8574 tnapi->last_tag = 0;
8575 tnapi->last_irq_tag = 0;
8576 tnapi->hw_status->status = 0;
8577 tnapi->hw_status->status_tag = 0;
8578 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8583 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8585 tnapi->rx_rcb_ptr = 0;
8587 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8589 if (tnapi->prodring.rx_std &&
8590 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8599 static void tg3_mem_tx_release(struct tg3 *tp)
8603 for (i = 0; i < tp->irq_max; i++) {
8604 struct tg3_napi *tnapi = &tp->napi[i];
8606 if (tnapi->tx_ring) {
8607 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8608 tnapi->tx_ring, tnapi->tx_desc_mapping);
8609 tnapi->tx_ring = NULL;
8612 kfree(tnapi->tx_buffers);
8613 tnapi->tx_buffers = NULL;
8617 static int tg3_mem_tx_acquire(struct tg3 *tp)
8620 struct tg3_napi *tnapi = &tp->napi[0];
8622 /* If multivector TSS is enabled, vector 0 does not handle
8623 * tx interrupts. Don't allocate any resources for it.
8625 if (tg3_flag(tp, ENABLE_TSS))
8628 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8629 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8630 sizeof(struct tg3_tx_ring_info),
8632 if (!tnapi->tx_buffers)
8635 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8637 &tnapi->tx_desc_mapping,
8639 if (!tnapi->tx_ring)
8646 tg3_mem_tx_release(tp);
8650 static void tg3_mem_rx_release(struct tg3 *tp)
8654 for (i = 0; i < tp->irq_max; i++) {
8655 struct tg3_napi *tnapi = &tp->napi[i];
8657 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8662 dma_free_coherent(&tp->pdev->dev,
8663 TG3_RX_RCB_RING_BYTES(tp),
8665 tnapi->rx_rcb_mapping);
8666 tnapi->rx_rcb = NULL;
8670 static int tg3_mem_rx_acquire(struct tg3 *tp)
8672 unsigned int i, limit;
8674 limit = tp->rxq_cnt;
8676 /* If RSS is enabled, we need a (dummy) producer ring
8677 * set on vector zero. This is the true hw prodring.
8679 if (tg3_flag(tp, ENABLE_RSS))
8682 for (i = 0; i < limit; i++) {
8683 struct tg3_napi *tnapi = &tp->napi[i];
8685 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8688 /* If multivector RSS is enabled, vector 0
8689 * does not handle rx or tx interrupts.
8690 * Don't allocate any resources for it.
8692 if (!i && tg3_flag(tp, ENABLE_RSS))
8695 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8696 TG3_RX_RCB_RING_BYTES(tp),
8697 &tnapi->rx_rcb_mapping,
8706 tg3_mem_rx_release(tp);
8711 * Must not be invoked with interrupt sources disabled and
8712 * the hardware shutdown down.
8714 static void tg3_free_consistent(struct tg3 *tp)
8718 for (i = 0; i < tp->irq_cnt; i++) {
8719 struct tg3_napi *tnapi = &tp->napi[i];
8721 if (tnapi->hw_status) {
8722 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8724 tnapi->status_mapping);
8725 tnapi->hw_status = NULL;
8729 tg3_mem_rx_release(tp);
8730 tg3_mem_tx_release(tp);
8732 /* tp->hw_stats can be referenced safely:
8733 * 1. under rtnl_lock
8734 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8737 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8738 tp->hw_stats, tp->stats_mapping);
8739 tp->hw_stats = NULL;
8744 * Must not be invoked with interrupt sources disabled and
8745 * the hardware shutdown down. Can sleep.
8747 static int tg3_alloc_consistent(struct tg3 *tp)
8751 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8752 sizeof(struct tg3_hw_stats),
8753 &tp->stats_mapping, GFP_KERNEL);
8757 for (i = 0; i < tp->irq_cnt; i++) {
8758 struct tg3_napi *tnapi = &tp->napi[i];
8759 struct tg3_hw_status *sblk;
8761 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8763 &tnapi->status_mapping,
8765 if (!tnapi->hw_status)
8768 sblk = tnapi->hw_status;
8770 if (tg3_flag(tp, ENABLE_RSS)) {
8771 u16 *prodptr = NULL;
8774 * When RSS is enabled, the status block format changes
8775 * slightly. The "rx_jumbo_consumer", "reserved",
8776 * and "rx_mini_consumer" members get mapped to the
8777 * other three rx return ring producer indexes.
8781 prodptr = &sblk->idx[0].rx_producer;
8784 prodptr = &sblk->rx_jumbo_consumer;
8787 prodptr = &sblk->reserved;
8790 prodptr = &sblk->rx_mini_consumer;
8793 tnapi->rx_rcb_prod_idx = prodptr;
8795 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8799 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8805 tg3_free_consistent(tp);
8809 #define MAX_WAIT_CNT 1000
8811 /* To stop a block, clear the enable bit and poll till it
8812 * clears. tp->lock is held.
8814 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8819 if (tg3_flag(tp, 5705_PLUS)) {
8826 /* We can't enable/disable these bits of the
8827 * 5705/5750, just say success.
8840 for (i = 0; i < MAX_WAIT_CNT; i++) {
8841 if (pci_channel_offline(tp->pdev)) {
8842 dev_err(&tp->pdev->dev,
8843 "tg3_stop_block device offline, "
8844 "ofs=%lx enable_bit=%x\n",
8851 if ((val & enable_bit) == 0)
8855 if (i == MAX_WAIT_CNT && !silent) {
8856 dev_err(&tp->pdev->dev,
8857 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8865 /* tp->lock is held. */
8866 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8870 tg3_disable_ints(tp);
8872 if (pci_channel_offline(tp->pdev)) {
8873 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8874 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8879 tp->rx_mode &= ~RX_MODE_ENABLE;
8880 tw32_f(MAC_RX_MODE, tp->rx_mode);
8883 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8884 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8885 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8886 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8887 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8888 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8890 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8891 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8892 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8893 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8894 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8895 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8896 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8898 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8899 tw32_f(MAC_MODE, tp->mac_mode);
8902 tp->tx_mode &= ~TX_MODE_ENABLE;
8903 tw32_f(MAC_TX_MODE, tp->tx_mode);
8905 for (i = 0; i < MAX_WAIT_CNT; i++) {
8907 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8910 if (i >= MAX_WAIT_CNT) {
8911 dev_err(&tp->pdev->dev,
8912 "%s timed out, TX_MODE_ENABLE will not clear "
8913 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8917 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8918 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8919 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8921 tw32(FTQ_RESET, 0xffffffff);
8922 tw32(FTQ_RESET, 0x00000000);
8924 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8925 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8928 for (i = 0; i < tp->irq_cnt; i++) {
8929 struct tg3_napi *tnapi = &tp->napi[i];
8930 if (tnapi->hw_status)
8931 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8937 /* Save PCI command register before chip reset */
8938 static void tg3_save_pci_state(struct tg3 *tp)
8940 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8943 /* Restore PCI state after chip reset */
8944 static void tg3_restore_pci_state(struct tg3 *tp)
8948 /* Re-enable indirect register accesses. */
8949 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8950 tp->misc_host_ctrl);
8952 /* Set MAX PCI retry to zero. */
8953 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8954 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8955 tg3_flag(tp, PCIX_MODE))
8956 val |= PCISTATE_RETRY_SAME_DMA;
8957 /* Allow reads and writes to the APE register and memory space. */
8958 if (tg3_flag(tp, ENABLE_APE))
8959 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8960 PCISTATE_ALLOW_APE_SHMEM_WR |
8961 PCISTATE_ALLOW_APE_PSPACE_WR;
8962 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8964 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8966 if (!tg3_flag(tp, PCI_EXPRESS)) {
8967 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8968 tp->pci_cacheline_sz);
8969 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8973 /* Make sure PCI-X relaxed ordering bit is clear. */
8974 if (tg3_flag(tp, PCIX_MODE)) {
8977 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8979 pcix_cmd &= ~PCI_X_CMD_ERO;
8980 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8984 if (tg3_flag(tp, 5780_CLASS)) {
8986 /* Chip reset on 5780 will reset MSI enable bit,
8987 * so need to restore it.
8989 if (tg3_flag(tp, USING_MSI)) {
8992 pci_read_config_word(tp->pdev,
8993 tp->msi_cap + PCI_MSI_FLAGS,
8995 pci_write_config_word(tp->pdev,
8996 tp->msi_cap + PCI_MSI_FLAGS,
8997 ctrl | PCI_MSI_FLAGS_ENABLE);
8998 val = tr32(MSGINT_MODE);
8999 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9004 static void tg3_override_clk(struct tg3 *tp)
9008 switch (tg3_asic_rev(tp)) {
9010 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9011 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9012 TG3_CPMU_MAC_ORIDE_ENABLE);
9017 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9025 static void tg3_restore_clk(struct tg3 *tp)
9029 switch (tg3_asic_rev(tp)) {
9031 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9032 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9033 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9038 val = tr32(TG3_CPMU_CLCK_ORIDE);
9039 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9047 /* tp->lock is held. */
9048 static int tg3_chip_reset(struct tg3 *tp)
9049 __releases(tp->lock)
9050 __acquires(tp->lock)
9053 void (*write_op)(struct tg3 *, u32, u32);
9056 if (!pci_device_is_present(tp->pdev))
9061 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9063 /* No matching tg3_nvram_unlock() after this because
9064 * chip reset below will undo the nvram lock.
9066 tp->nvram_lock_cnt = 0;
9068 /* GRC_MISC_CFG core clock reset will clear the memory
9069 * enable bit in PCI register 4 and the MSI enable bit
9070 * on some chips, so we save relevant registers here.
9072 tg3_save_pci_state(tp);
9074 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9075 tg3_flag(tp, 5755_PLUS))
9076 tw32(GRC_FASTBOOT_PC, 0);
9079 * We must avoid the readl() that normally takes place.
9080 * It locks machines, causes machine checks, and other
9081 * fun things. So, temporarily disable the 5701
9082 * hardware workaround, while we do the reset.
9084 write_op = tp->write32;
9085 if (write_op == tg3_write_flush_reg32)
9086 tp->write32 = tg3_write32;
9088 /* Prevent the irq handler from reading or writing PCI registers
9089 * during chip reset when the memory enable bit in the PCI command
9090 * register may be cleared. The chip does not generate interrupt
9091 * at this time, but the irq handler may still be called due to irq
9092 * sharing or irqpoll.
9094 tg3_flag_set(tp, CHIP_RESETTING);
9095 for (i = 0; i < tp->irq_cnt; i++) {
9096 struct tg3_napi *tnapi = &tp->napi[i];
9097 if (tnapi->hw_status) {
9098 tnapi->hw_status->status = 0;
9099 tnapi->hw_status->status_tag = 0;
9101 tnapi->last_tag = 0;
9102 tnapi->last_irq_tag = 0;
9106 tg3_full_unlock(tp);
9108 for (i = 0; i < tp->irq_cnt; i++)
9109 synchronize_irq(tp->napi[i].irq_vec);
9111 tg3_full_lock(tp, 0);
9113 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9114 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9115 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9119 val = GRC_MISC_CFG_CORECLK_RESET;
9121 if (tg3_flag(tp, PCI_EXPRESS)) {
9122 /* Force PCIe 1.0a mode */
9123 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9124 !tg3_flag(tp, 57765_PLUS) &&
9125 tr32(TG3_PCIE_PHY_TSTCTL) ==
9126 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9127 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9129 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9130 tw32(GRC_MISC_CFG, (1 << 29));
9135 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9136 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9137 tw32(GRC_VCPU_EXT_CTRL,
9138 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9141 /* Set the clock to the highest frequency to avoid timeouts. With link
9142 * aware mode, the clock speed could be slow and bootcode does not
9143 * complete within the expected time. Override the clock to allow the
9144 * bootcode to finish sooner and then restore it.
9146 tg3_override_clk(tp);
9148 /* Manage gphy power for all CPMU absent PCIe devices. */
9149 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9150 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9152 tw32(GRC_MISC_CFG, val);
9154 /* restore 5701 hardware bug workaround write method */
9155 tp->write32 = write_op;
9157 /* Unfortunately, we have to delay before the PCI read back.
9158 * Some 575X chips even will not respond to a PCI cfg access
9159 * when the reset command is given to the chip.
9161 * How do these hardware designers expect things to work
9162 * properly if the PCI write is posted for a long period
9163 * of time? It is always necessary to have some method by
9164 * which a register read back can occur to push the write
9165 * out which does the reset.
9167 * For most tg3 variants the trick below was working.
9172 /* Flush PCI posted writes. The normal MMIO registers
9173 * are inaccessible at this time so this is the only
9174 * way to make this reliably (actually, this is no longer
9175 * the case, see above). I tried to use indirect
9176 * register read/write but this upset some 5701 variants.
9178 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9182 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9185 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9189 /* Wait for link training to complete. */
9190 for (j = 0; j < 5000; j++)
9193 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9194 pci_write_config_dword(tp->pdev, 0xc4,
9195 cfg_val | (1 << 15));
9198 /* Clear the "no snoop" and "relaxed ordering" bits. */
9199 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9201 * Older PCIe devices only support the 128 byte
9202 * MPS setting. Enforce the restriction.
9204 if (!tg3_flag(tp, CPMU_PRESENT))
9205 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9206 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9208 /* Clear error status */
9209 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9210 PCI_EXP_DEVSTA_CED |
9211 PCI_EXP_DEVSTA_NFED |
9212 PCI_EXP_DEVSTA_FED |
9213 PCI_EXP_DEVSTA_URD);
9216 tg3_restore_pci_state(tp);
9218 tg3_flag_clear(tp, CHIP_RESETTING);
9219 tg3_flag_clear(tp, ERROR_PROCESSED);
9222 if (tg3_flag(tp, 5780_CLASS))
9223 val = tr32(MEMARB_MODE);
9224 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9226 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9228 tw32(0x5000, 0x400);
9231 if (tg3_flag(tp, IS_SSB_CORE)) {
9233 * BCM4785: In order to avoid repercussions from using
9234 * potentially defective internal ROM, stop the Rx RISC CPU,
9235 * which is not required.
9238 tg3_halt_cpu(tp, RX_CPU_BASE);
9241 err = tg3_poll_fw(tp);
9245 tw32(GRC_MODE, tp->grc_mode);
9247 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9250 tw32(0xc4, val | (1 << 15));
9253 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9254 tg3_asic_rev(tp) == ASIC_REV_5705) {
9255 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9256 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9257 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9258 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9261 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9262 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9264 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9265 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9270 tw32_f(MAC_MODE, val);
9273 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9277 if (tg3_flag(tp, PCI_EXPRESS) &&
9278 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9279 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9280 !tg3_flag(tp, 57765_PLUS)) {
9283 tw32(0x7c00, val | (1 << 25));
9286 tg3_restore_clk(tp);
9288 /* Increase the core clock speed to fix tx timeout issue for 5762
9289 * with 100Mbps link speed.
9291 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9292 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9293 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9294 TG3_CPMU_MAC_ORIDE_ENABLE);
9297 /* Reprobe ASF enable state. */
9298 tg3_flag_clear(tp, ENABLE_ASF);
9299 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9300 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9302 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9303 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9304 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9307 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9308 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9309 tg3_flag_set(tp, ENABLE_ASF);
9310 tp->last_event_jiffies = jiffies;
9311 if (tg3_flag(tp, 5750_PLUS))
9312 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9314 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9315 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9316 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9317 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9318 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9325 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9326 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9327 static void __tg3_set_rx_mode(struct net_device *);
9329 /* tp->lock is held. */
9330 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9336 tg3_write_sig_pre_reset(tp, kind);
9338 tg3_abort_hw(tp, silent);
9339 err = tg3_chip_reset(tp);
9341 __tg3_set_mac_addr(tp, false);
9343 tg3_write_sig_legacy(tp, kind);
9344 tg3_write_sig_post_reset(tp, kind);
9347 /* Save the stats across chip resets... */
9348 tg3_get_nstats(tp, &tp->net_stats_prev);
9349 tg3_get_estats(tp, &tp->estats_prev);
9351 /* And make sure the next sample is new data */
9352 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9358 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9360 struct tg3 *tp = netdev_priv(dev);
9361 struct sockaddr *addr = p;
9363 bool skip_mac_1 = false;
9365 if (!is_valid_ether_addr(addr->sa_data))
9366 return -EADDRNOTAVAIL;
9368 eth_hw_addr_set(dev, addr->sa_data);
9370 if (!netif_running(dev))
9373 if (tg3_flag(tp, ENABLE_ASF)) {
9374 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9376 addr0_high = tr32(MAC_ADDR_0_HIGH);
9377 addr0_low = tr32(MAC_ADDR_0_LOW);
9378 addr1_high = tr32(MAC_ADDR_1_HIGH);
9379 addr1_low = tr32(MAC_ADDR_1_LOW);
9381 /* Skip MAC addr 1 if ASF is using it. */
9382 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9383 !(addr1_high == 0 && addr1_low == 0))
9386 spin_lock_bh(&tp->lock);
9387 __tg3_set_mac_addr(tp, skip_mac_1);
9388 __tg3_set_rx_mode(dev);
9389 spin_unlock_bh(&tp->lock);
9394 /* tp->lock is held. */
9395 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9396 dma_addr_t mapping, u32 maxlen_flags,
9400 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9401 ((u64) mapping >> 32));
9403 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9404 ((u64) mapping & 0xffffffff));
9406 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9409 if (!tg3_flag(tp, 5705_PLUS))
9411 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9416 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9420 if (!tg3_flag(tp, ENABLE_TSS)) {
9421 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9422 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9423 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9425 tw32(HOSTCC_TXCOL_TICKS, 0);
9426 tw32(HOSTCC_TXMAX_FRAMES, 0);
9427 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9429 for (; i < tp->txq_cnt; i++) {
9432 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9433 tw32(reg, ec->tx_coalesce_usecs);
9434 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9435 tw32(reg, ec->tx_max_coalesced_frames);
9436 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9437 tw32(reg, ec->tx_max_coalesced_frames_irq);
9441 for (; i < tp->irq_max - 1; i++) {
9442 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9443 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9444 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9448 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9451 u32 limit = tp->rxq_cnt;
9453 if (!tg3_flag(tp, ENABLE_RSS)) {
9454 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9455 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9456 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9459 tw32(HOSTCC_RXCOL_TICKS, 0);
9460 tw32(HOSTCC_RXMAX_FRAMES, 0);
9461 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9464 for (; i < limit; i++) {
9467 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9468 tw32(reg, ec->rx_coalesce_usecs);
9469 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9470 tw32(reg, ec->rx_max_coalesced_frames);
9471 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9472 tw32(reg, ec->rx_max_coalesced_frames_irq);
9475 for (; i < tp->irq_max - 1; i++) {
9476 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9477 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9478 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9482 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9484 tg3_coal_tx_init(tp, ec);
9485 tg3_coal_rx_init(tp, ec);
9487 if (!tg3_flag(tp, 5705_PLUS)) {
9488 u32 val = ec->stats_block_coalesce_usecs;
9490 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9491 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9496 tw32(HOSTCC_STAT_COAL_TICKS, val);
9500 /* tp->lock is held. */
9501 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9505 /* Disable all transmit rings but the first. */
9506 if (!tg3_flag(tp, 5705_PLUS))
9507 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9508 else if (tg3_flag(tp, 5717_PLUS))
9509 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9510 else if (tg3_flag(tp, 57765_CLASS) ||
9511 tg3_asic_rev(tp) == ASIC_REV_5762)
9512 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9514 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9516 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9517 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9518 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9519 BDINFO_FLAGS_DISABLED);
9522 /* tp->lock is held. */
9523 static void tg3_tx_rcbs_init(struct tg3 *tp)
9526 u32 txrcb = NIC_SRAM_SEND_RCB;
9528 if (tg3_flag(tp, ENABLE_TSS))
9531 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9532 struct tg3_napi *tnapi = &tp->napi[i];
9534 if (!tnapi->tx_ring)
9537 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9538 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9539 NIC_SRAM_TX_BUFFER_DESC);
9543 /* tp->lock is held. */
9544 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9548 /* Disable all receive return rings but the first. */
9549 if (tg3_flag(tp, 5717_PLUS))
9550 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9551 else if (!tg3_flag(tp, 5705_PLUS))
9552 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9553 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9554 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9555 tg3_flag(tp, 57765_CLASS))
9556 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9558 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9560 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9561 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9562 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9563 BDINFO_FLAGS_DISABLED);
9566 /* tp->lock is held. */
9567 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9570 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9572 if (tg3_flag(tp, ENABLE_RSS))
9575 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9576 struct tg3_napi *tnapi = &tp->napi[i];
9581 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9582 (tp->rx_ret_ring_mask + 1) <<
9583 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9587 /* tp->lock is held. */
9588 static void tg3_rings_reset(struct tg3 *tp)
9592 struct tg3_napi *tnapi = &tp->napi[0];
9594 tg3_tx_rcbs_disable(tp);
9596 tg3_rx_ret_rcbs_disable(tp);
9598 /* Disable interrupts */
9599 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9600 tp->napi[0].chk_msi_cnt = 0;
9601 tp->napi[0].last_rx_cons = 0;
9602 tp->napi[0].last_tx_cons = 0;
9604 /* Zero mailbox registers. */
9605 if (tg3_flag(tp, SUPPORT_MSIX)) {
9606 for (i = 1; i < tp->irq_max; i++) {
9607 tp->napi[i].tx_prod = 0;
9608 tp->napi[i].tx_cons = 0;
9609 if (tg3_flag(tp, ENABLE_TSS))
9610 tw32_mailbox(tp->napi[i].prodmbox, 0);
9611 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9612 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9613 tp->napi[i].chk_msi_cnt = 0;
9614 tp->napi[i].last_rx_cons = 0;
9615 tp->napi[i].last_tx_cons = 0;
9617 if (!tg3_flag(tp, ENABLE_TSS))
9618 tw32_mailbox(tp->napi[0].prodmbox, 0);
9620 tp->napi[0].tx_prod = 0;
9621 tp->napi[0].tx_cons = 0;
9622 tw32_mailbox(tp->napi[0].prodmbox, 0);
9623 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9626 /* Make sure the NIC-based send BD rings are disabled. */
9627 if (!tg3_flag(tp, 5705_PLUS)) {
9628 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9629 for (i = 0; i < 16; i++)
9630 tw32_tx_mbox(mbox + i * 8, 0);
9633 /* Clear status block in ram. */
9634 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9636 /* Set status block DMA address */
9637 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9638 ((u64) tnapi->status_mapping >> 32));
9639 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9640 ((u64) tnapi->status_mapping & 0xffffffff));
9642 stblk = HOSTCC_STATBLCK_RING1;
9644 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9645 u64 mapping = (u64)tnapi->status_mapping;
9646 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9647 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9650 /* Clear status block in ram. */
9651 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9654 tg3_tx_rcbs_init(tp);
9655 tg3_rx_ret_rcbs_init(tp);
9658 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9660 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9662 if (!tg3_flag(tp, 5750_PLUS) ||
9663 tg3_flag(tp, 5780_CLASS) ||
9664 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9665 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9666 tg3_flag(tp, 57765_PLUS))
9667 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9668 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9669 tg3_asic_rev(tp) == ASIC_REV_5787)
9670 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9672 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9674 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9675 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9677 val = min(nic_rep_thresh, host_rep_thresh);
9678 tw32(RCVBDI_STD_THRESH, val);
9680 if (tg3_flag(tp, 57765_PLUS))
9681 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9683 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9686 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9688 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9690 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9691 tw32(RCVBDI_JUMBO_THRESH, val);
9693 if (tg3_flag(tp, 57765_PLUS))
9694 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9697 static inline u32 calc_crc(unsigned char *buf, int len)
9705 for (j = 0; j < len; j++) {
9708 for (k = 0; k < 8; k++) {
9714 reg ^= CRC32_POLY_LE;
9721 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9723 /* accept or reject all multicast frames */
9724 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9725 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9726 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9727 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9730 static void __tg3_set_rx_mode(struct net_device *dev)
9732 struct tg3 *tp = netdev_priv(dev);
9735 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9736 RX_MODE_KEEP_VLAN_TAG);
9738 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9739 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9742 if (!tg3_flag(tp, ENABLE_ASF))
9743 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9746 if (dev->flags & IFF_PROMISC) {
9747 /* Promiscuous mode. */
9748 rx_mode |= RX_MODE_PROMISC;
9749 } else if (dev->flags & IFF_ALLMULTI) {
9750 /* Accept all multicast. */
9751 tg3_set_multi(tp, 1);
9752 } else if (netdev_mc_empty(dev)) {
9753 /* Reject all multicast. */
9754 tg3_set_multi(tp, 0);
9756 /* Accept one or more multicast(s). */
9757 struct netdev_hw_addr *ha;
9758 u32 mc_filter[4] = { 0, };
9763 netdev_for_each_mc_addr(ha, dev) {
9764 crc = calc_crc(ha->addr, ETH_ALEN);
9766 regidx = (bit & 0x60) >> 5;
9768 mc_filter[regidx] |= (1 << bit);
9771 tw32(MAC_HASH_REG_0, mc_filter[0]);
9772 tw32(MAC_HASH_REG_1, mc_filter[1]);
9773 tw32(MAC_HASH_REG_2, mc_filter[2]);
9774 tw32(MAC_HASH_REG_3, mc_filter[3]);
9777 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9778 rx_mode |= RX_MODE_PROMISC;
9779 } else if (!(dev->flags & IFF_PROMISC)) {
9780 /* Add all entries into to the mac addr filter list */
9782 struct netdev_hw_addr *ha;
9784 netdev_for_each_uc_addr(ha, dev) {
9785 __tg3_set_one_mac_addr(tp, ha->addr,
9786 i + TG3_UCAST_ADDR_IDX(tp));
9791 if (rx_mode != tp->rx_mode) {
9792 tp->rx_mode = rx_mode;
9793 tw32_f(MAC_RX_MODE, rx_mode);
9798 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9802 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9803 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9806 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9810 if (!tg3_flag(tp, SUPPORT_MSIX))
9813 if (tp->rxq_cnt == 1) {
9814 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9818 /* Validate table against current IRQ count */
9819 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9820 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9824 if (i != TG3_RSS_INDIR_TBL_SIZE)
9825 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9828 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9831 u32 reg = MAC_RSS_INDIR_TBL_0;
9833 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9834 u32 val = tp->rss_ind_tbl[i];
9836 for (; i % 8; i++) {
9838 val |= tp->rss_ind_tbl[i];
9845 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9847 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9848 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9850 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9853 /* tp->lock is held. */
9854 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9856 u32 val, rdmac_mode;
9858 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9860 tg3_disable_ints(tp);
9864 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9866 if (tg3_flag(tp, INIT_COMPLETE))
9867 tg3_abort_hw(tp, 1);
9869 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9870 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9871 tg3_phy_pull_config(tp);
9872 tg3_eee_pull_config(tp, NULL);
9873 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9876 /* Enable MAC control of LPI */
9877 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9883 err = tg3_chip_reset(tp);
9887 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9889 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9890 val = tr32(TG3_CPMU_CTRL);
9891 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9892 tw32(TG3_CPMU_CTRL, val);
9894 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9895 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9896 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9897 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9899 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9900 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9901 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9902 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9904 val = tr32(TG3_CPMU_HST_ACC);
9905 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9906 val |= CPMU_HST_ACC_MACCLK_6_25;
9907 tw32(TG3_CPMU_HST_ACC, val);
9910 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9911 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9912 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9913 PCIE_PWR_MGMT_L1_THRESH_4MS;
9914 tw32(PCIE_PWR_MGMT_THRESH, val);
9916 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9917 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9919 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9921 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9922 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9925 if (tg3_flag(tp, L1PLLPD_EN)) {
9926 u32 grc_mode = tr32(GRC_MODE);
9928 /* Access the lower 1K of PL PCIE block registers. */
9929 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9930 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9932 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9933 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9934 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9936 tw32(GRC_MODE, grc_mode);
9939 if (tg3_flag(tp, 57765_CLASS)) {
9940 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9941 u32 grc_mode = tr32(GRC_MODE);
9943 /* Access the lower 1K of PL PCIE block registers. */
9944 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9945 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9947 val = tr32(TG3_PCIE_TLDLPL_PORT +
9948 TG3_PCIE_PL_LO_PHYCTL5);
9949 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9950 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9952 tw32(GRC_MODE, grc_mode);
9955 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9958 /* Fix transmit hangs */
9959 val = tr32(TG3_CPMU_PADRNG_CTL);
9960 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9961 tw32(TG3_CPMU_PADRNG_CTL, val);
9963 grc_mode = tr32(GRC_MODE);
9965 /* Access the lower 1K of DL PCIE block registers. */
9966 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9967 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9969 val = tr32(TG3_PCIE_TLDLPL_PORT +
9970 TG3_PCIE_DL_LO_FTSMAX);
9971 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9972 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9973 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9975 tw32(GRC_MODE, grc_mode);
9978 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9979 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9980 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9981 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9984 /* This works around an issue with Athlon chipsets on
9985 * B3 tigon3 silicon. This bit has no effect on any
9986 * other revision. But do not set this on PCI Express
9987 * chips and don't even touch the clocks if the CPMU is present.
9989 if (!tg3_flag(tp, CPMU_PRESENT)) {
9990 if (!tg3_flag(tp, PCI_EXPRESS))
9991 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9992 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9995 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9996 tg3_flag(tp, PCIX_MODE)) {
9997 val = tr32(TG3PCI_PCISTATE);
9998 val |= PCISTATE_RETRY_SAME_DMA;
9999 tw32(TG3PCI_PCISTATE, val);
10002 if (tg3_flag(tp, ENABLE_APE)) {
10003 /* Allow reads and writes to the
10004 * APE register and memory space.
10006 val = tr32(TG3PCI_PCISTATE);
10007 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10008 PCISTATE_ALLOW_APE_SHMEM_WR |
10009 PCISTATE_ALLOW_APE_PSPACE_WR;
10010 tw32(TG3PCI_PCISTATE, val);
10013 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10014 /* Enable some hw fixes. */
10015 val = tr32(TG3PCI_MSI_DATA);
10016 val |= (1 << 26) | (1 << 28) | (1 << 29);
10017 tw32(TG3PCI_MSI_DATA, val);
10020 /* Descriptor ring init may make accesses to the
10021 * NIC SRAM area to setup the TX descriptors, so we
10022 * can only do this after the hardware has been
10023 * successfully reset.
10025 err = tg3_init_rings(tp);
10029 if (tg3_flag(tp, 57765_PLUS)) {
10030 val = tr32(TG3PCI_DMA_RW_CTRL) &
10031 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10032 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10033 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10034 if (!tg3_flag(tp, 57765_CLASS) &&
10035 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10036 tg3_asic_rev(tp) != ASIC_REV_5762)
10037 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10038 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10039 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10040 tg3_asic_rev(tp) != ASIC_REV_5761) {
10041 /* This value is determined during the probe time DMA
10042 * engine test, tg3_test_dma.
10044 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10047 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10048 GRC_MODE_4X_NIC_SEND_RINGS |
10049 GRC_MODE_NO_TX_PHDR_CSUM |
10050 GRC_MODE_NO_RX_PHDR_CSUM);
10051 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10053 /* Pseudo-header checksum is done by hardware logic and not
10054 * the offload processers, so make the chip do the pseudo-
10055 * header checksums on receive. For transmit it is more
10056 * convenient to do the pseudo-header checksum in software
10057 * as Linux does that on transmit for us in all cases.
10059 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10061 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10063 tw32(TG3_RX_PTP_CTL,
10064 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10066 if (tg3_flag(tp, PTP_CAPABLE))
10067 val |= GRC_MODE_TIME_SYNC_ENABLE;
10069 tw32(GRC_MODE, tp->grc_mode | val);
10071 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10072 * south bridge limitation. As a workaround, Driver is setting MRRS
10073 * to 2048 instead of default 4096.
10075 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10076 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10077 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10078 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10081 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10082 val = tr32(GRC_MISC_CFG);
10084 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10085 tw32(GRC_MISC_CFG, val);
10087 /* Initialize MBUF/DESC pool. */
10088 if (tg3_flag(tp, 5750_PLUS)) {
10090 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10091 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10092 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10093 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10095 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10096 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10097 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10098 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10101 fw_len = tp->fw_len;
10102 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10103 tw32(BUFMGR_MB_POOL_ADDR,
10104 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10105 tw32(BUFMGR_MB_POOL_SIZE,
10106 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10109 if (tp->dev->mtu <= ETH_DATA_LEN) {
10110 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10111 tp->bufmgr_config.mbuf_read_dma_low_water);
10112 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10113 tp->bufmgr_config.mbuf_mac_rx_low_water);
10114 tw32(BUFMGR_MB_HIGH_WATER,
10115 tp->bufmgr_config.mbuf_high_water);
10117 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10118 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10119 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10120 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10121 tw32(BUFMGR_MB_HIGH_WATER,
10122 tp->bufmgr_config.mbuf_high_water_jumbo);
10124 tw32(BUFMGR_DMA_LOW_WATER,
10125 tp->bufmgr_config.dma_low_water);
10126 tw32(BUFMGR_DMA_HIGH_WATER,
10127 tp->bufmgr_config.dma_high_water);
10129 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10130 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10131 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10132 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10133 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10134 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10135 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10136 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10137 tw32(BUFMGR_MODE, val);
10138 for (i = 0; i < 2000; i++) {
10139 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10144 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10148 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10149 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10151 tg3_setup_rxbd_thresholds(tp);
10153 /* Initialize TG3_BDINFO's at:
10154 * RCVDBDI_STD_BD: standard eth size rx ring
10155 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10156 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10159 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10160 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10161 * ring attribute flags
10162 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10164 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10165 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10167 * The size of each ring is fixed in the firmware, but the location is
10170 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10171 ((u64) tpr->rx_std_mapping >> 32));
10172 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10173 ((u64) tpr->rx_std_mapping & 0xffffffff));
10174 if (!tg3_flag(tp, 5717_PLUS))
10175 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10176 NIC_SRAM_RX_BUFFER_DESC);
10178 /* Disable the mini ring */
10179 if (!tg3_flag(tp, 5705_PLUS))
10180 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10181 BDINFO_FLAGS_DISABLED);
10183 /* Program the jumbo buffer descriptor ring control
10184 * blocks on those devices that have them.
10186 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10187 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10189 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10190 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10191 ((u64) tpr->rx_jmb_mapping >> 32));
10192 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10193 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10194 val = TG3_RX_JMB_RING_SIZE(tp) <<
10195 BDINFO_FLAGS_MAXLEN_SHIFT;
10196 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10197 val | BDINFO_FLAGS_USE_EXT_RECV);
10198 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10199 tg3_flag(tp, 57765_CLASS) ||
10200 tg3_asic_rev(tp) == ASIC_REV_5762)
10201 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10202 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10204 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10205 BDINFO_FLAGS_DISABLED);
10208 if (tg3_flag(tp, 57765_PLUS)) {
10209 val = TG3_RX_STD_RING_SIZE(tp);
10210 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10211 val |= (TG3_RX_STD_DMA_SZ << 2);
10213 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10215 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10217 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10219 tpr->rx_std_prod_idx = tp->rx_pending;
10220 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10222 tpr->rx_jmb_prod_idx =
10223 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10224 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10226 tg3_rings_reset(tp);
10228 /* Initialize MAC address and backoff seed. */
10229 __tg3_set_mac_addr(tp, false);
10231 /* MTU + ethernet header + FCS + optional VLAN tag */
10232 tw32(MAC_RX_MTU_SIZE,
10233 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10235 /* The slot time is changed by tg3_setup_phy if we
10236 * run at gigabit with half duplex.
10238 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10239 (6 << TX_LENGTHS_IPG_SHIFT) |
10240 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10242 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10243 tg3_asic_rev(tp) == ASIC_REV_5762)
10244 val |= tr32(MAC_TX_LENGTHS) &
10245 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10246 TX_LENGTHS_CNT_DWN_VAL_MSK);
10248 tw32(MAC_TX_LENGTHS, val);
10250 /* Receive rules. */
10251 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10252 tw32(RCVLPC_CONFIG, 0x0181);
10254 /* Calculate RDMAC_MODE setting early, we need it to determine
10255 * the RCVLPC_STATE_ENABLE mask.
10257 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10258 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10259 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10260 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10261 RDMAC_MODE_LNGREAD_ENAB);
10263 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10264 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10266 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10267 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10268 tg3_asic_rev(tp) == ASIC_REV_57780)
10269 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10270 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10271 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10273 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10274 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10275 if (tg3_flag(tp, TSO_CAPABLE)) {
10276 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10277 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10278 !tg3_flag(tp, IS_5788)) {
10279 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10283 if (tg3_flag(tp, PCI_EXPRESS))
10284 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10286 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10288 if (tp->dev->mtu <= ETH_DATA_LEN) {
10289 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10290 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10294 if (tg3_flag(tp, HW_TSO_1) ||
10295 tg3_flag(tp, HW_TSO_2) ||
10296 tg3_flag(tp, HW_TSO_3))
10297 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10299 if (tg3_flag(tp, 57765_PLUS) ||
10300 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10301 tg3_asic_rev(tp) == ASIC_REV_57780)
10302 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10304 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10305 tg3_asic_rev(tp) == ASIC_REV_5762)
10306 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10308 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10309 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10310 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10311 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10312 tg3_flag(tp, 57765_PLUS)) {
10315 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10316 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10318 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10320 val = tr32(tgtreg);
10321 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10322 tg3_asic_rev(tp) == ASIC_REV_5762) {
10323 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10324 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10325 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10326 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10327 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10328 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10330 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10333 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10334 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10335 tg3_asic_rev(tp) == ASIC_REV_5762) {
10338 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10339 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10341 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10343 val = tr32(tgtreg);
10345 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10346 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10349 /* Receive/send statistics. */
10350 if (tg3_flag(tp, 5750_PLUS)) {
10351 val = tr32(RCVLPC_STATS_ENABLE);
10352 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10353 tw32(RCVLPC_STATS_ENABLE, val);
10354 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10355 tg3_flag(tp, TSO_CAPABLE)) {
10356 val = tr32(RCVLPC_STATS_ENABLE);
10357 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10358 tw32(RCVLPC_STATS_ENABLE, val);
10360 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10362 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10363 tw32(SNDDATAI_STATSENAB, 0xffffff);
10364 tw32(SNDDATAI_STATSCTRL,
10365 (SNDDATAI_SCTRL_ENABLE |
10366 SNDDATAI_SCTRL_FASTUPD));
10368 /* Setup host coalescing engine. */
10369 tw32(HOSTCC_MODE, 0);
10370 for (i = 0; i < 2000; i++) {
10371 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10376 __tg3_set_coalesce(tp, &tp->coal);
10378 if (!tg3_flag(tp, 5705_PLUS)) {
10379 /* Status/statistics block address. See tg3_timer,
10380 * the tg3_periodic_fetch_stats call there, and
10381 * tg3_get_stats to see how this works for 5705/5750 chips.
10383 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10384 ((u64) tp->stats_mapping >> 32));
10385 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10386 ((u64) tp->stats_mapping & 0xffffffff));
10387 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10389 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10391 /* Clear statistics and status block memory areas */
10392 for (i = NIC_SRAM_STATS_BLK;
10393 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10394 i += sizeof(u32)) {
10395 tg3_write_mem(tp, i, 0);
10400 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10402 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10403 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10404 if (!tg3_flag(tp, 5705_PLUS))
10405 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10407 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10408 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10409 /* reset to prevent losing 1st rx packet intermittently */
10410 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10414 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10415 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10416 MAC_MODE_FHDE_ENABLE;
10417 if (tg3_flag(tp, ENABLE_APE))
10418 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10419 if (!tg3_flag(tp, 5705_PLUS) &&
10420 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10421 tg3_asic_rev(tp) != ASIC_REV_5700)
10422 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10423 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10426 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10427 * If TG3_FLAG_IS_NIC is zero, we should read the
10428 * register to preserve the GPIO settings for LOMs. The GPIOs,
10429 * whether used as inputs or outputs, are set by boot code after
10432 if (!tg3_flag(tp, IS_NIC)) {
10435 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10436 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10437 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10439 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10440 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10441 GRC_LCLCTRL_GPIO_OUTPUT3;
10443 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10444 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10446 tp->grc_local_ctrl &= ~gpio_mask;
10447 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10449 /* GPIO1 must be driven high for eeprom write protect */
10450 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10451 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10452 GRC_LCLCTRL_GPIO_OUTPUT1);
10454 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10457 if (tg3_flag(tp, USING_MSIX)) {
10458 val = tr32(MSGINT_MODE);
10459 val |= MSGINT_MODE_ENABLE;
10460 if (tp->irq_cnt > 1)
10461 val |= MSGINT_MODE_MULTIVEC_EN;
10462 if (!tg3_flag(tp, 1SHOT_MSI))
10463 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10464 tw32(MSGINT_MODE, val);
10467 if (!tg3_flag(tp, 5705_PLUS)) {
10468 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10472 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10473 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10474 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10475 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10476 WDMAC_MODE_LNGREAD_ENAB);
10478 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10479 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10480 if (tg3_flag(tp, TSO_CAPABLE) &&
10481 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10482 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10484 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10485 !tg3_flag(tp, IS_5788)) {
10486 val |= WDMAC_MODE_RX_ACCEL;
10490 /* Enable host coalescing bug fix */
10491 if (tg3_flag(tp, 5755_PLUS))
10492 val |= WDMAC_MODE_STATUS_TAG_FIX;
10494 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10495 val |= WDMAC_MODE_BURST_ALL_DATA;
10497 tw32_f(WDMAC_MODE, val);
10500 if (tg3_flag(tp, PCIX_MODE)) {
10503 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10505 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10506 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10507 pcix_cmd |= PCI_X_CMD_READ_2K;
10508 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10509 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10510 pcix_cmd |= PCI_X_CMD_READ_2K;
10512 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10516 tw32_f(RDMAC_MODE, rdmac_mode);
10519 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10520 tg3_asic_rev(tp) == ASIC_REV_5720) {
10521 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10522 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10525 if (i < TG3_NUM_RDMA_CHANNELS) {
10526 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10527 val |= tg3_lso_rd_dma_workaround_bit(tp);
10528 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10529 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10533 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10534 if (!tg3_flag(tp, 5705_PLUS))
10535 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10537 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10538 tw32(SNDDATAC_MODE,
10539 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10541 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10543 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10544 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10545 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10546 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10547 val |= RCVDBDI_MODE_LRG_RING_SZ;
10548 tw32(RCVDBDI_MODE, val);
10549 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10550 if (tg3_flag(tp, HW_TSO_1) ||
10551 tg3_flag(tp, HW_TSO_2) ||
10552 tg3_flag(tp, HW_TSO_3))
10553 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10554 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10555 if (tg3_flag(tp, ENABLE_TSS))
10556 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10557 tw32(SNDBDI_MODE, val);
10558 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10560 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10561 err = tg3_load_5701_a0_firmware_fix(tp);
10566 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10567 /* Ignore any errors for the firmware download. If download
10568 * fails, the device will operate with EEE disabled
10570 tg3_load_57766_firmware(tp);
10573 if (tg3_flag(tp, TSO_CAPABLE)) {
10574 err = tg3_load_tso_firmware(tp);
10579 tp->tx_mode = TX_MODE_ENABLE;
10581 if (tg3_flag(tp, 5755_PLUS) ||
10582 tg3_asic_rev(tp) == ASIC_REV_5906)
10583 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10585 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10586 tg3_asic_rev(tp) == ASIC_REV_5762) {
10587 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10588 tp->tx_mode &= ~val;
10589 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10592 tw32_f(MAC_TX_MODE, tp->tx_mode);
10595 if (tg3_flag(tp, ENABLE_RSS)) {
10598 tg3_rss_write_indir_tbl(tp);
10600 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10602 for (i = 0; i < 10 ; i++)
10603 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10606 tp->rx_mode = RX_MODE_ENABLE;
10607 if (tg3_flag(tp, 5755_PLUS))
10608 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10610 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10611 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10613 if (tg3_flag(tp, ENABLE_RSS))
10614 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10615 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10616 RX_MODE_RSS_IPV6_HASH_EN |
10617 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10618 RX_MODE_RSS_IPV4_HASH_EN |
10619 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10621 tw32_f(MAC_RX_MODE, tp->rx_mode);
10624 tw32(MAC_LED_CTRL, tp->led_ctrl);
10626 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10627 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10628 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10631 tw32_f(MAC_RX_MODE, tp->rx_mode);
10634 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10635 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10636 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10637 /* Set drive transmission level to 1.2V */
10638 /* only if the signal pre-emphasis bit is not set */
10639 val = tr32(MAC_SERDES_CFG);
10642 tw32(MAC_SERDES_CFG, val);
10644 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10645 tw32(MAC_SERDES_CFG, 0x616000);
10648 /* Prevent chip from dropping frames when flow control
10651 if (tg3_flag(tp, 57765_CLASS))
10655 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10657 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10658 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10659 /* Use hardware link auto-negotiation */
10660 tg3_flag_set(tp, HW_AUTONEG);
10663 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10664 tg3_asic_rev(tp) == ASIC_REV_5714) {
10667 tmp = tr32(SERDES_RX_CTRL);
10668 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10669 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10670 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10671 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10674 if (!tg3_flag(tp, USE_PHYLIB)) {
10675 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10676 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10678 err = tg3_setup_phy(tp, false);
10682 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10683 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10686 /* Clear CRC stats. */
10687 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10688 tg3_writephy(tp, MII_TG3_TEST1,
10689 tmp | MII_TG3_TEST1_CRC_EN);
10690 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10695 __tg3_set_rx_mode(tp->dev);
10697 /* Initialize receive rules. */
10698 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10699 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10700 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10701 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10703 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10707 if (tg3_flag(tp, ENABLE_ASF))
10711 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10714 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10717 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10720 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10723 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10726 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10729 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10732 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10735 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10738 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10741 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10744 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10747 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10749 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10757 if (tg3_flag(tp, ENABLE_APE))
10758 /* Write our heartbeat update interval to APE. */
10759 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10760 APE_HOST_HEARTBEAT_INT_5SEC);
10762 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10767 /* Called at device open time to get the chip ready for
10768 * packet processing. Invoked with tp->lock held.
10770 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10772 /* Chip may have been just powered on. If so, the boot code may still
10773 * be running initialization. Wait for it to finish to avoid races in
10774 * accessing the hardware.
10776 tg3_enable_register_access(tp);
10779 tg3_switch_clocks(tp);
10781 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10783 return tg3_reset_hw(tp, reset_phy);
10786 #ifdef CONFIG_TIGON3_HWMON
10787 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10789 u32 off, len = TG3_OCIR_LEN;
10792 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10793 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10795 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10796 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10797 memset(ocir, 0, len);
10801 /* sysfs attributes for hwmon */
10802 static ssize_t tg3_show_temp(struct device *dev,
10803 struct device_attribute *devattr, char *buf)
10805 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10806 struct tg3 *tp = dev_get_drvdata(dev);
10809 spin_lock_bh(&tp->lock);
10810 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10811 sizeof(temperature));
10812 spin_unlock_bh(&tp->lock);
10813 return sprintf(buf, "%u\n", temperature * 1000);
10817 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10818 TG3_TEMP_SENSOR_OFFSET);
10819 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10820 TG3_TEMP_CAUTION_OFFSET);
10821 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10822 TG3_TEMP_MAX_OFFSET);
10824 static struct attribute *tg3_attrs[] = {
10825 &sensor_dev_attr_temp1_input.dev_attr.attr,
10826 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10827 &sensor_dev_attr_temp1_max.dev_attr.attr,
10830 ATTRIBUTE_GROUPS(tg3);
10832 static void tg3_hwmon_close(struct tg3 *tp)
10834 if (tp->hwmon_dev) {
10835 hwmon_device_unregister(tp->hwmon_dev);
10836 tp->hwmon_dev = NULL;
10840 static void tg3_hwmon_open(struct tg3 *tp)
10844 struct pci_dev *pdev = tp->pdev;
10845 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10847 tg3_sd_scan_scratchpad(tp, ocirs);
10849 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10850 if (!ocirs[i].src_data_length)
10853 size += ocirs[i].src_hdr_length;
10854 size += ocirs[i].src_data_length;
10860 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10862 if (IS_ERR(tp->hwmon_dev)) {
10863 tp->hwmon_dev = NULL;
10864 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10868 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10869 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10870 #endif /* CONFIG_TIGON3_HWMON */
10873 #define TG3_STAT_ADD32(PSTAT, REG) \
10874 do { u32 __val = tr32(REG); \
10875 (PSTAT)->low += __val; \
10876 if ((PSTAT)->low < __val) \
10877 (PSTAT)->high += 1; \
10880 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10882 struct tg3_hw_stats *sp = tp->hw_stats;
10887 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10888 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10889 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10890 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10891 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10892 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10893 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10894 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10895 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10896 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10897 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10898 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10899 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10900 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10901 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10902 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10905 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10906 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10907 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10908 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10911 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10912 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10913 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10914 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10915 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10916 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10917 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10918 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10919 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10920 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10921 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10922 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10923 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10924 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10926 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10927 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10928 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10929 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10930 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10931 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10933 u32 val = tr32(HOSTCC_FLOW_ATTN);
10934 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10936 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10937 sp->rx_discards.low += val;
10938 if (sp->rx_discards.low < val)
10939 sp->rx_discards.high += 1;
10941 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10943 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10946 static void tg3_chk_missed_msi(struct tg3 *tp)
10950 for (i = 0; i < tp->irq_cnt; i++) {
10951 struct tg3_napi *tnapi = &tp->napi[i];
10953 if (tg3_has_work(tnapi)) {
10954 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10955 tnapi->last_tx_cons == tnapi->tx_cons) {
10956 if (tnapi->chk_msi_cnt < 1) {
10957 tnapi->chk_msi_cnt++;
10963 tnapi->chk_msi_cnt = 0;
10964 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10965 tnapi->last_tx_cons = tnapi->tx_cons;
10969 static void tg3_timer(struct timer_list *t)
10971 struct tg3 *tp = from_timer(tp, t, timer);
10973 spin_lock(&tp->lock);
10975 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10976 spin_unlock(&tp->lock);
10977 goto restart_timer;
10980 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10981 tg3_flag(tp, 57765_CLASS))
10982 tg3_chk_missed_msi(tp);
10984 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10985 /* BCM4785: Flush posted writes from GbE to host memory. */
10989 if (!tg3_flag(tp, TAGGED_STATUS)) {
10990 /* All of this garbage is because when using non-tagged
10991 * IRQ status the mailbox/status_block protocol the chip
10992 * uses with the cpu is race prone.
10994 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10995 tw32(GRC_LOCAL_CTRL,
10996 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10998 tw32(HOSTCC_MODE, tp->coalesce_mode |
10999 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11002 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11003 spin_unlock(&tp->lock);
11004 tg3_reset_task_schedule(tp);
11005 goto restart_timer;
11009 /* This part only runs once per second. */
11010 if (!--tp->timer_counter) {
11011 if (tg3_flag(tp, 5705_PLUS))
11012 tg3_periodic_fetch_stats(tp);
11014 if (tp->setlpicnt && !--tp->setlpicnt)
11015 tg3_phy_eee_enable(tp);
11017 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11021 mac_stat = tr32(MAC_STATUS);
11024 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11025 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11027 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11031 tg3_setup_phy(tp, false);
11032 } else if (tg3_flag(tp, POLL_SERDES)) {
11033 u32 mac_stat = tr32(MAC_STATUS);
11034 int need_setup = 0;
11037 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11040 if (!tp->link_up &&
11041 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11042 MAC_STATUS_SIGNAL_DET))) {
11046 if (!tp->serdes_counter) {
11049 ~MAC_MODE_PORT_MODE_MASK));
11051 tw32_f(MAC_MODE, tp->mac_mode);
11054 tg3_setup_phy(tp, false);
11056 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11057 tg3_flag(tp, 5780_CLASS)) {
11058 tg3_serdes_parallel_detect(tp);
11059 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11060 u32 cpmu = tr32(TG3_CPMU_STATUS);
11061 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11062 TG3_CPMU_STATUS_LINK_MASK);
11064 if (link_up != tp->link_up)
11065 tg3_setup_phy(tp, false);
11068 tp->timer_counter = tp->timer_multiplier;
11071 /* Heartbeat is only sent once every 2 seconds.
11073 * The heartbeat is to tell the ASF firmware that the host
11074 * driver is still alive. In the event that the OS crashes,
11075 * ASF needs to reset the hardware to free up the FIFO space
11076 * that may be filled with rx packets destined for the host.
11077 * If the FIFO is full, ASF will no longer function properly.
11079 * Unintended resets have been reported on real time kernels
11080 * where the timer doesn't run on time. Netpoll will also have
11083 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11084 * to check the ring condition when the heartbeat is expiring
11085 * before doing the reset. This will prevent most unintended
11088 if (!--tp->asf_counter) {
11089 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11090 tg3_wait_for_event_ack(tp);
11092 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11093 FWCMD_NICDRV_ALIVE3);
11094 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11095 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11096 TG3_FW_UPDATE_TIMEOUT_SEC);
11098 tg3_generate_fw_event(tp);
11100 tp->asf_counter = tp->asf_multiplier;
11103 /* Update the APE heartbeat every 5 seconds.*/
11104 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11106 spin_unlock(&tp->lock);
11109 tp->timer.expires = jiffies + tp->timer_offset;
11110 add_timer(&tp->timer);
11113 static void tg3_timer_init(struct tg3 *tp)
11115 if (tg3_flag(tp, TAGGED_STATUS) &&
11116 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11117 !tg3_flag(tp, 57765_CLASS))
11118 tp->timer_offset = HZ;
11120 tp->timer_offset = HZ / 10;
11122 BUG_ON(tp->timer_offset > HZ);
11124 tp->timer_multiplier = (HZ / tp->timer_offset);
11125 tp->asf_multiplier = (HZ / tp->timer_offset) *
11126 TG3_FW_UPDATE_FREQ_SEC;
11128 timer_setup(&tp->timer, tg3_timer, 0);
11131 static void tg3_timer_start(struct tg3 *tp)
11133 tp->asf_counter = tp->asf_multiplier;
11134 tp->timer_counter = tp->timer_multiplier;
11136 tp->timer.expires = jiffies + tp->timer_offset;
11137 add_timer(&tp->timer);
11140 static void tg3_timer_stop(struct tg3 *tp)
11142 del_timer_sync(&tp->timer);
11145 /* Restart hardware after configuration changes, self-test, etc.
11146 * Invoked with tp->lock held.
11148 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11149 __releases(tp->lock)
11150 __acquires(tp->lock)
11154 err = tg3_init_hw(tp, reset_phy);
11156 netdev_err(tp->dev,
11157 "Failed to re-initialize device, aborting\n");
11158 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11159 tg3_full_unlock(tp);
11160 tg3_timer_stop(tp);
11162 tg3_napi_enable(tp);
11163 dev_close(tp->dev);
11164 tg3_full_lock(tp, 0);
11169 static void tg3_reset_task(struct work_struct *work)
11171 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11175 tg3_full_lock(tp, 0);
11177 if (!netif_running(tp->dev)) {
11178 tg3_flag_clear(tp, RESET_TASK_PENDING);
11179 tg3_full_unlock(tp);
11184 tg3_full_unlock(tp);
11188 tg3_netif_stop(tp);
11190 tg3_full_lock(tp, 1);
11192 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11193 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11194 tp->write32_rx_mbox = tg3_write_flush_reg32;
11195 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11196 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11199 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11200 err = tg3_init_hw(tp, true);
11202 tg3_full_unlock(tp);
11204 tg3_napi_enable(tp);
11205 /* Clear this flag so that tg3_reset_task_cancel() will not
11206 * call cancel_work_sync() and wait forever.
11208 tg3_flag_clear(tp, RESET_TASK_PENDING);
11209 dev_close(tp->dev);
11213 tg3_netif_start(tp);
11214 tg3_full_unlock(tp);
11216 tg3_flag_clear(tp, RESET_TASK_PENDING);
11221 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11224 unsigned long flags;
11226 struct tg3_napi *tnapi = &tp->napi[irq_num];
11228 if (tp->irq_cnt == 1)
11229 name = tp->dev->name;
11231 name = &tnapi->irq_lbl[0];
11232 if (tnapi->tx_buffers && tnapi->rx_rcb)
11233 snprintf(name, IFNAMSIZ,
11234 "%s-txrx-%d", tp->dev->name, irq_num);
11235 else if (tnapi->tx_buffers)
11236 snprintf(name, IFNAMSIZ,
11237 "%s-tx-%d", tp->dev->name, irq_num);
11238 else if (tnapi->rx_rcb)
11239 snprintf(name, IFNAMSIZ,
11240 "%s-rx-%d", tp->dev->name, irq_num);
11242 snprintf(name, IFNAMSIZ,
11243 "%s-%d", tp->dev->name, irq_num);
11244 name[IFNAMSIZ-1] = 0;
11247 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11249 if (tg3_flag(tp, 1SHOT_MSI))
11250 fn = tg3_msi_1shot;
11253 fn = tg3_interrupt;
11254 if (tg3_flag(tp, TAGGED_STATUS))
11255 fn = tg3_interrupt_tagged;
11256 flags = IRQF_SHARED;
11259 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11262 static int tg3_test_interrupt(struct tg3 *tp)
11264 struct tg3_napi *tnapi = &tp->napi[0];
11265 struct net_device *dev = tp->dev;
11266 int err, i, intr_ok = 0;
11269 if (!netif_running(dev))
11272 tg3_disable_ints(tp);
11274 free_irq(tnapi->irq_vec, tnapi);
11277 * Turn off MSI one shot mode. Otherwise this test has no
11278 * observable way to know whether the interrupt was delivered.
11280 if (tg3_flag(tp, 57765_PLUS)) {
11281 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11282 tw32(MSGINT_MODE, val);
11285 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11286 IRQF_SHARED, dev->name, tnapi);
11290 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11291 tg3_enable_ints(tp);
11293 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11296 for (i = 0; i < 5; i++) {
11297 u32 int_mbox, misc_host_ctrl;
11299 int_mbox = tr32_mailbox(tnapi->int_mbox);
11300 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11302 if ((int_mbox != 0) ||
11303 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11308 if (tg3_flag(tp, 57765_PLUS) &&
11309 tnapi->hw_status->status_tag != tnapi->last_tag)
11310 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11315 tg3_disable_ints(tp);
11317 free_irq(tnapi->irq_vec, tnapi);
11319 err = tg3_request_irq(tp, 0);
11325 /* Reenable MSI one shot mode. */
11326 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11327 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11328 tw32(MSGINT_MODE, val);
11336 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11337 * successfully restored
11339 static int tg3_test_msi(struct tg3 *tp)
11344 if (!tg3_flag(tp, USING_MSI))
11347 /* Turn off SERR reporting in case MSI terminates with Master
11350 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11351 pci_write_config_word(tp->pdev, PCI_COMMAND,
11352 pci_cmd & ~PCI_COMMAND_SERR);
11354 err = tg3_test_interrupt(tp);
11356 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11361 /* other failures */
11365 /* MSI test failed, go back to INTx mode */
11366 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11367 "to INTx mode. Please report this failure to the PCI "
11368 "maintainer and include system chipset information\n");
11370 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11372 pci_disable_msi(tp->pdev);
11374 tg3_flag_clear(tp, USING_MSI);
11375 tp->napi[0].irq_vec = tp->pdev->irq;
11377 err = tg3_request_irq(tp, 0);
11381 /* Need to reset the chip because the MSI cycle may have terminated
11382 * with Master Abort.
11384 tg3_full_lock(tp, 1);
11386 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11387 err = tg3_init_hw(tp, true);
11389 tg3_full_unlock(tp);
11392 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11397 static int tg3_request_firmware(struct tg3 *tp)
11399 const struct tg3_firmware_hdr *fw_hdr;
11401 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11402 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11407 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11409 /* Firmware blob starts with version numbers, followed by
11410 * start address and _full_ length including BSS sections
11411 * (which must be longer than the actual data, of course
11414 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11415 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11416 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11417 tp->fw_len, tp->fw_needed);
11418 release_firmware(tp->fw);
11423 /* We no longer need firmware; we have it. */
11424 tp->fw_needed = NULL;
11428 static u32 tg3_irq_count(struct tg3 *tp)
11430 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11433 /* We want as many rx rings enabled as there are cpus.
11434 * In multiqueue MSI-X mode, the first MSI-X vector
11435 * only deals with link interrupts, etc, so we add
11436 * one to the number of vectors we are requesting.
11438 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11444 static bool tg3_enable_msix(struct tg3 *tp)
11447 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11449 tp->txq_cnt = tp->txq_req;
11450 tp->rxq_cnt = tp->rxq_req;
11452 tp->rxq_cnt = netif_get_num_default_rss_queues();
11453 if (tp->rxq_cnt > tp->rxq_max)
11454 tp->rxq_cnt = tp->rxq_max;
11456 /* Disable multiple TX rings by default. Simple round-robin hardware
11457 * scheduling of the TX rings can cause starvation of rings with
11458 * small packets when other rings have TSO or jumbo packets.
11463 tp->irq_cnt = tg3_irq_count(tp);
11465 for (i = 0; i < tp->irq_max; i++) {
11466 msix_ent[i].entry = i;
11467 msix_ent[i].vector = 0;
11470 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11473 } else if (rc < tp->irq_cnt) {
11474 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11477 tp->rxq_cnt = max(rc - 1, 1);
11479 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11482 for (i = 0; i < tp->irq_max; i++)
11483 tp->napi[i].irq_vec = msix_ent[i].vector;
11485 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11486 pci_disable_msix(tp->pdev);
11490 if (tp->irq_cnt == 1)
11493 tg3_flag_set(tp, ENABLE_RSS);
11495 if (tp->txq_cnt > 1)
11496 tg3_flag_set(tp, ENABLE_TSS);
11498 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11503 static void tg3_ints_init(struct tg3 *tp)
11505 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11506 !tg3_flag(tp, TAGGED_STATUS)) {
11507 /* All MSI supporting chips should support tagged
11508 * status. Assert that this is the case.
11510 netdev_warn(tp->dev,
11511 "MSI without TAGGED_STATUS? Not using MSI\n");
11515 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11516 tg3_flag_set(tp, USING_MSIX);
11517 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11518 tg3_flag_set(tp, USING_MSI);
11520 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11521 u32 msi_mode = tr32(MSGINT_MODE);
11522 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11523 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11524 if (!tg3_flag(tp, 1SHOT_MSI))
11525 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11526 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11529 if (!tg3_flag(tp, USING_MSIX)) {
11531 tp->napi[0].irq_vec = tp->pdev->irq;
11534 if (tp->irq_cnt == 1) {
11537 netif_set_real_num_tx_queues(tp->dev, 1);
11538 netif_set_real_num_rx_queues(tp->dev, 1);
11542 static void tg3_ints_fini(struct tg3 *tp)
11544 if (tg3_flag(tp, USING_MSIX))
11545 pci_disable_msix(tp->pdev);
11546 else if (tg3_flag(tp, USING_MSI))
11547 pci_disable_msi(tp->pdev);
11548 tg3_flag_clear(tp, USING_MSI);
11549 tg3_flag_clear(tp, USING_MSIX);
11550 tg3_flag_clear(tp, ENABLE_RSS);
11551 tg3_flag_clear(tp, ENABLE_TSS);
11554 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11557 struct net_device *dev = tp->dev;
11561 * Setup interrupts first so we know how
11562 * many NAPI resources to allocate
11566 tg3_rss_check_indir_tbl(tp);
11568 /* The placement of this call is tied
11569 * to the setup and use of Host TX descriptors.
11571 err = tg3_alloc_consistent(tp);
11573 goto out_ints_fini;
11577 tg3_napi_enable(tp);
11579 for (i = 0; i < tp->irq_cnt; i++) {
11580 err = tg3_request_irq(tp, i);
11582 for (i--; i >= 0; i--) {
11583 struct tg3_napi *tnapi = &tp->napi[i];
11585 free_irq(tnapi->irq_vec, tnapi);
11587 goto out_napi_fini;
11591 tg3_full_lock(tp, 0);
11594 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11596 err = tg3_init_hw(tp, reset_phy);
11598 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11599 tg3_free_rings(tp);
11602 tg3_full_unlock(tp);
11607 if (test_irq && tg3_flag(tp, USING_MSI)) {
11608 err = tg3_test_msi(tp);
11611 tg3_full_lock(tp, 0);
11612 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11613 tg3_free_rings(tp);
11614 tg3_full_unlock(tp);
11616 goto out_napi_fini;
11619 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11620 u32 val = tr32(PCIE_TRANSACTION_CFG);
11622 tw32(PCIE_TRANSACTION_CFG,
11623 val | PCIE_TRANS_CFG_1SHOT_MSI);
11629 tg3_hwmon_open(tp);
11631 tg3_full_lock(tp, 0);
11633 tg3_timer_start(tp);
11634 tg3_flag_set(tp, INIT_COMPLETE);
11635 tg3_enable_ints(tp);
11637 tg3_ptp_resume(tp);
11639 tg3_full_unlock(tp);
11641 netif_tx_start_all_queues(dev);
11644 * Reset loopback feature if it was turned on while the device was down
11645 * make sure that it's installed properly now.
11647 if (dev->features & NETIF_F_LOOPBACK)
11648 tg3_set_loopback(dev, dev->features);
11653 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11654 struct tg3_napi *tnapi = &tp->napi[i];
11655 free_irq(tnapi->irq_vec, tnapi);
11659 tg3_napi_disable(tp);
11661 tg3_free_consistent(tp);
11669 static void tg3_stop(struct tg3 *tp)
11673 tg3_reset_task_cancel(tp);
11674 tg3_netif_stop(tp);
11676 tg3_timer_stop(tp);
11678 tg3_hwmon_close(tp);
11682 tg3_full_lock(tp, 1);
11684 tg3_disable_ints(tp);
11686 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11687 tg3_free_rings(tp);
11688 tg3_flag_clear(tp, INIT_COMPLETE);
11690 tg3_full_unlock(tp);
11692 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11693 struct tg3_napi *tnapi = &tp->napi[i];
11694 free_irq(tnapi->irq_vec, tnapi);
11701 tg3_free_consistent(tp);
11704 static int tg3_open(struct net_device *dev)
11706 struct tg3 *tp = netdev_priv(dev);
11709 if (tp->pcierr_recovery) {
11710 netdev_err(dev, "Failed to open device. PCI error recovery "
11715 if (tp->fw_needed) {
11716 err = tg3_request_firmware(tp);
11717 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11719 netdev_warn(tp->dev, "EEE capability disabled\n");
11720 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11721 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11722 netdev_warn(tp->dev, "EEE capability restored\n");
11723 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11725 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11729 netdev_warn(tp->dev, "TSO capability disabled\n");
11730 tg3_flag_clear(tp, TSO_CAPABLE);
11731 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11732 netdev_notice(tp->dev, "TSO capability restored\n");
11733 tg3_flag_set(tp, TSO_CAPABLE);
11737 tg3_carrier_off(tp);
11739 err = tg3_power_up(tp);
11743 tg3_full_lock(tp, 0);
11745 tg3_disable_ints(tp);
11746 tg3_flag_clear(tp, INIT_COMPLETE);
11748 tg3_full_unlock(tp);
11750 err = tg3_start(tp,
11751 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11754 tg3_frob_aux_power(tp, false);
11755 pci_set_power_state(tp->pdev, PCI_D3hot);
11761 static int tg3_close(struct net_device *dev)
11763 struct tg3 *tp = netdev_priv(dev);
11765 if (tp->pcierr_recovery) {
11766 netdev_err(dev, "Failed to close device. PCI error recovery "
11773 if (pci_device_is_present(tp->pdev)) {
11774 tg3_power_down_prepare(tp);
11776 tg3_carrier_off(tp);
11781 static inline u64 get_stat64(tg3_stat64_t *val)
11783 return ((u64)val->high << 32) | ((u64)val->low);
11786 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11788 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11790 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11791 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11792 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11795 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11796 tg3_writephy(tp, MII_TG3_TEST1,
11797 val | MII_TG3_TEST1_CRC_EN);
11798 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11802 tp->phy_crc_errors += val;
11804 return tp->phy_crc_errors;
11807 return get_stat64(&hw_stats->rx_fcs_errors);
11810 #define ESTAT_ADD(member) \
11811 estats->member = old_estats->member + \
11812 get_stat64(&hw_stats->member)
11814 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11816 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11817 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11819 ESTAT_ADD(rx_octets);
11820 ESTAT_ADD(rx_fragments);
11821 ESTAT_ADD(rx_ucast_packets);
11822 ESTAT_ADD(rx_mcast_packets);
11823 ESTAT_ADD(rx_bcast_packets);
11824 ESTAT_ADD(rx_fcs_errors);
11825 ESTAT_ADD(rx_align_errors);
11826 ESTAT_ADD(rx_xon_pause_rcvd);
11827 ESTAT_ADD(rx_xoff_pause_rcvd);
11828 ESTAT_ADD(rx_mac_ctrl_rcvd);
11829 ESTAT_ADD(rx_xoff_entered);
11830 ESTAT_ADD(rx_frame_too_long_errors);
11831 ESTAT_ADD(rx_jabbers);
11832 ESTAT_ADD(rx_undersize_packets);
11833 ESTAT_ADD(rx_in_length_errors);
11834 ESTAT_ADD(rx_out_length_errors);
11835 ESTAT_ADD(rx_64_or_less_octet_packets);
11836 ESTAT_ADD(rx_65_to_127_octet_packets);
11837 ESTAT_ADD(rx_128_to_255_octet_packets);
11838 ESTAT_ADD(rx_256_to_511_octet_packets);
11839 ESTAT_ADD(rx_512_to_1023_octet_packets);
11840 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11841 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11842 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11843 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11844 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11846 ESTAT_ADD(tx_octets);
11847 ESTAT_ADD(tx_collisions);
11848 ESTAT_ADD(tx_xon_sent);
11849 ESTAT_ADD(tx_xoff_sent);
11850 ESTAT_ADD(tx_flow_control);
11851 ESTAT_ADD(tx_mac_errors);
11852 ESTAT_ADD(tx_single_collisions);
11853 ESTAT_ADD(tx_mult_collisions);
11854 ESTAT_ADD(tx_deferred);
11855 ESTAT_ADD(tx_excessive_collisions);
11856 ESTAT_ADD(tx_late_collisions);
11857 ESTAT_ADD(tx_collide_2times);
11858 ESTAT_ADD(tx_collide_3times);
11859 ESTAT_ADD(tx_collide_4times);
11860 ESTAT_ADD(tx_collide_5times);
11861 ESTAT_ADD(tx_collide_6times);
11862 ESTAT_ADD(tx_collide_7times);
11863 ESTAT_ADD(tx_collide_8times);
11864 ESTAT_ADD(tx_collide_9times);
11865 ESTAT_ADD(tx_collide_10times);
11866 ESTAT_ADD(tx_collide_11times);
11867 ESTAT_ADD(tx_collide_12times);
11868 ESTAT_ADD(tx_collide_13times);
11869 ESTAT_ADD(tx_collide_14times);
11870 ESTAT_ADD(tx_collide_15times);
11871 ESTAT_ADD(tx_ucast_packets);
11872 ESTAT_ADD(tx_mcast_packets);
11873 ESTAT_ADD(tx_bcast_packets);
11874 ESTAT_ADD(tx_carrier_sense_errors);
11875 ESTAT_ADD(tx_discards);
11876 ESTAT_ADD(tx_errors);
11878 ESTAT_ADD(dma_writeq_full);
11879 ESTAT_ADD(dma_write_prioq_full);
11880 ESTAT_ADD(rxbds_empty);
11881 ESTAT_ADD(rx_discards);
11882 ESTAT_ADD(rx_errors);
11883 ESTAT_ADD(rx_threshold_hit);
11885 ESTAT_ADD(dma_readq_full);
11886 ESTAT_ADD(dma_read_prioq_full);
11887 ESTAT_ADD(tx_comp_queue_full);
11889 ESTAT_ADD(ring_set_send_prod_index);
11890 ESTAT_ADD(ring_status_update);
11891 ESTAT_ADD(nic_irqs);
11892 ESTAT_ADD(nic_avoided_irqs);
11893 ESTAT_ADD(nic_tx_threshold_hit);
11895 ESTAT_ADD(mbuf_lwm_thresh_hit);
11898 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11900 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11901 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11903 stats->rx_packets = old_stats->rx_packets +
11904 get_stat64(&hw_stats->rx_ucast_packets) +
11905 get_stat64(&hw_stats->rx_mcast_packets) +
11906 get_stat64(&hw_stats->rx_bcast_packets);
11908 stats->tx_packets = old_stats->tx_packets +
11909 get_stat64(&hw_stats->tx_ucast_packets) +
11910 get_stat64(&hw_stats->tx_mcast_packets) +
11911 get_stat64(&hw_stats->tx_bcast_packets);
11913 stats->rx_bytes = old_stats->rx_bytes +
11914 get_stat64(&hw_stats->rx_octets);
11915 stats->tx_bytes = old_stats->tx_bytes +
11916 get_stat64(&hw_stats->tx_octets);
11918 stats->rx_errors = old_stats->rx_errors +
11919 get_stat64(&hw_stats->rx_errors);
11920 stats->tx_errors = old_stats->tx_errors +
11921 get_stat64(&hw_stats->tx_errors) +
11922 get_stat64(&hw_stats->tx_mac_errors) +
11923 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11924 get_stat64(&hw_stats->tx_discards);
11926 stats->multicast = old_stats->multicast +
11927 get_stat64(&hw_stats->rx_mcast_packets);
11928 stats->collisions = old_stats->collisions +
11929 get_stat64(&hw_stats->tx_collisions);
11931 stats->rx_length_errors = old_stats->rx_length_errors +
11932 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11933 get_stat64(&hw_stats->rx_undersize_packets);
11935 stats->rx_frame_errors = old_stats->rx_frame_errors +
11936 get_stat64(&hw_stats->rx_align_errors);
11937 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11938 get_stat64(&hw_stats->tx_discards);
11939 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11940 get_stat64(&hw_stats->tx_carrier_sense_errors);
11942 stats->rx_crc_errors = old_stats->rx_crc_errors +
11943 tg3_calc_crc_errors(tp);
11945 stats->rx_missed_errors = old_stats->rx_missed_errors +
11946 get_stat64(&hw_stats->rx_discards);
11948 stats->rx_dropped = tp->rx_dropped;
11949 stats->tx_dropped = tp->tx_dropped;
11952 static int tg3_get_regs_len(struct net_device *dev)
11954 return TG3_REG_BLK_SIZE;
11957 static void tg3_get_regs(struct net_device *dev,
11958 struct ethtool_regs *regs, void *_p)
11960 struct tg3 *tp = netdev_priv(dev);
11964 memset(_p, 0, TG3_REG_BLK_SIZE);
11966 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11969 tg3_full_lock(tp, 0);
11971 tg3_dump_legacy_regs(tp, (u32 *)_p);
11973 tg3_full_unlock(tp);
11976 static int tg3_get_eeprom_len(struct net_device *dev)
11978 struct tg3 *tp = netdev_priv(dev);
11980 return tp->nvram_size;
11983 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11985 struct tg3 *tp = netdev_priv(dev);
11986 int ret, cpmu_restore = 0;
11988 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11991 if (tg3_flag(tp, NO_NVRAM))
11994 offset = eeprom->offset;
11998 eeprom->magic = TG3_EEPROM_MAGIC;
12000 /* Override clock, link aware and link idle modes */
12001 if (tg3_flag(tp, CPMU_PRESENT)) {
12002 cpmu_val = tr32(TG3_CPMU_CTRL);
12003 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12004 CPMU_CTRL_LINK_IDLE_MODE)) {
12005 tw32(TG3_CPMU_CTRL, cpmu_val &
12006 ~(CPMU_CTRL_LINK_AWARE_MODE |
12007 CPMU_CTRL_LINK_IDLE_MODE));
12011 tg3_override_clk(tp);
12014 /* adjustments to start on required 4 byte boundary */
12015 b_offset = offset & 3;
12016 b_count = 4 - b_offset;
12017 if (b_count > len) {
12018 /* i.e. offset=1 len=2 */
12021 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12024 memcpy(data, ((char *)&val) + b_offset, b_count);
12027 eeprom->len += b_count;
12030 /* read bytes up to the last 4 byte boundary */
12031 pd = &data[eeprom->len];
12032 for (i = 0; i < (len - (len & 3)); i += 4) {
12033 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12040 memcpy(pd + i, &val, 4);
12041 if (need_resched()) {
12042 if (signal_pending(current)) {
12053 /* read last bytes not ending on 4 byte boundary */
12054 pd = &data[eeprom->len];
12056 b_offset = offset + len - b_count;
12057 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12060 memcpy(pd, &val, b_count);
12061 eeprom->len += b_count;
12066 /* Restore clock, link aware and link idle modes */
12067 tg3_restore_clk(tp);
12069 tw32(TG3_CPMU_CTRL, cpmu_val);
12074 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12076 struct tg3 *tp = netdev_priv(dev);
12078 u32 offset, len, b_offset, odd_len;
12080 __be32 start = 0, end;
12082 if (tg3_flag(tp, NO_NVRAM) ||
12083 eeprom->magic != TG3_EEPROM_MAGIC)
12086 offset = eeprom->offset;
12089 if ((b_offset = (offset & 3))) {
12090 /* adjustments to start on required 4 byte boundary */
12091 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12102 /* adjustments to end on required 4 byte boundary */
12104 len = (len + 3) & ~3;
12105 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12111 if (b_offset || odd_len) {
12112 buf = kmalloc(len, GFP_KERNEL);
12116 memcpy(buf, &start, 4);
12118 memcpy(buf+len-4, &end, 4);
12119 memcpy(buf + b_offset, data, eeprom->len);
12122 ret = tg3_nvram_write_block(tp, offset, len, buf);
12130 static int tg3_get_link_ksettings(struct net_device *dev,
12131 struct ethtool_link_ksettings *cmd)
12133 struct tg3 *tp = netdev_priv(dev);
12134 u32 supported, advertising;
12136 if (tg3_flag(tp, USE_PHYLIB)) {
12137 struct phy_device *phydev;
12138 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12140 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12141 phy_ethtool_ksettings_get(phydev, cmd);
12146 supported = (SUPPORTED_Autoneg);
12148 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12149 supported |= (SUPPORTED_1000baseT_Half |
12150 SUPPORTED_1000baseT_Full);
12152 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12153 supported |= (SUPPORTED_100baseT_Half |
12154 SUPPORTED_100baseT_Full |
12155 SUPPORTED_10baseT_Half |
12156 SUPPORTED_10baseT_Full |
12158 cmd->base.port = PORT_TP;
12160 supported |= SUPPORTED_FIBRE;
12161 cmd->base.port = PORT_FIBRE;
12163 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12166 advertising = tp->link_config.advertising;
12167 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12168 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12169 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12170 advertising |= ADVERTISED_Pause;
12172 advertising |= ADVERTISED_Pause |
12173 ADVERTISED_Asym_Pause;
12175 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12176 advertising |= ADVERTISED_Asym_Pause;
12179 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12182 if (netif_running(dev) && tp->link_up) {
12183 cmd->base.speed = tp->link_config.active_speed;
12184 cmd->base.duplex = tp->link_config.active_duplex;
12185 ethtool_convert_legacy_u32_to_link_mode(
12186 cmd->link_modes.lp_advertising,
12187 tp->link_config.rmt_adv);
12189 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12190 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12191 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12193 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12196 cmd->base.speed = SPEED_UNKNOWN;
12197 cmd->base.duplex = DUPLEX_UNKNOWN;
12198 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12200 cmd->base.phy_address = tp->phy_addr;
12201 cmd->base.autoneg = tp->link_config.autoneg;
12205 static int tg3_set_link_ksettings(struct net_device *dev,
12206 const struct ethtool_link_ksettings *cmd)
12208 struct tg3 *tp = netdev_priv(dev);
12209 u32 speed = cmd->base.speed;
12212 if (tg3_flag(tp, USE_PHYLIB)) {
12213 struct phy_device *phydev;
12214 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12216 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12217 return phy_ethtool_ksettings_set(phydev, cmd);
12220 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12221 cmd->base.autoneg != AUTONEG_DISABLE)
12224 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12225 cmd->base.duplex != DUPLEX_FULL &&
12226 cmd->base.duplex != DUPLEX_HALF)
12229 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12230 cmd->link_modes.advertising);
12232 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12233 u32 mask = ADVERTISED_Autoneg |
12235 ADVERTISED_Asym_Pause;
12237 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12238 mask |= ADVERTISED_1000baseT_Half |
12239 ADVERTISED_1000baseT_Full;
12241 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12242 mask |= ADVERTISED_100baseT_Half |
12243 ADVERTISED_100baseT_Full |
12244 ADVERTISED_10baseT_Half |
12245 ADVERTISED_10baseT_Full |
12248 mask |= ADVERTISED_FIBRE;
12250 if (advertising & ~mask)
12253 mask &= (ADVERTISED_1000baseT_Half |
12254 ADVERTISED_1000baseT_Full |
12255 ADVERTISED_100baseT_Half |
12256 ADVERTISED_100baseT_Full |
12257 ADVERTISED_10baseT_Half |
12258 ADVERTISED_10baseT_Full);
12260 advertising &= mask;
12262 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12263 if (speed != SPEED_1000)
12266 if (cmd->base.duplex != DUPLEX_FULL)
12269 if (speed != SPEED_100 &&
12275 tg3_full_lock(tp, 0);
12277 tp->link_config.autoneg = cmd->base.autoneg;
12278 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12279 tp->link_config.advertising = (advertising |
12280 ADVERTISED_Autoneg);
12281 tp->link_config.speed = SPEED_UNKNOWN;
12282 tp->link_config.duplex = DUPLEX_UNKNOWN;
12284 tp->link_config.advertising = 0;
12285 tp->link_config.speed = speed;
12286 tp->link_config.duplex = cmd->base.duplex;
12289 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12291 tg3_warn_mgmt_link_flap(tp);
12293 if (netif_running(dev))
12294 tg3_setup_phy(tp, true);
12296 tg3_full_unlock(tp);
12301 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12303 struct tg3 *tp = netdev_priv(dev);
12305 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12306 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12307 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12310 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12312 struct tg3 *tp = netdev_priv(dev);
12314 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12315 wol->supported = WAKE_MAGIC;
12317 wol->supported = 0;
12319 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12320 wol->wolopts = WAKE_MAGIC;
12321 memset(&wol->sopass, 0, sizeof(wol->sopass));
12324 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12326 struct tg3 *tp = netdev_priv(dev);
12327 struct device *dp = &tp->pdev->dev;
12329 if (wol->wolopts & ~WAKE_MAGIC)
12331 if ((wol->wolopts & WAKE_MAGIC) &&
12332 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12335 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12337 if (device_may_wakeup(dp))
12338 tg3_flag_set(tp, WOL_ENABLE);
12340 tg3_flag_clear(tp, WOL_ENABLE);
12345 static u32 tg3_get_msglevel(struct net_device *dev)
12347 struct tg3 *tp = netdev_priv(dev);
12348 return tp->msg_enable;
12351 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12353 struct tg3 *tp = netdev_priv(dev);
12354 tp->msg_enable = value;
12357 static int tg3_nway_reset(struct net_device *dev)
12359 struct tg3 *tp = netdev_priv(dev);
12362 if (!netif_running(dev))
12365 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12368 tg3_warn_mgmt_link_flap(tp);
12370 if (tg3_flag(tp, USE_PHYLIB)) {
12371 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12373 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12377 spin_lock_bh(&tp->lock);
12379 tg3_readphy(tp, MII_BMCR, &bmcr);
12380 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12381 ((bmcr & BMCR_ANENABLE) ||
12382 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12383 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12387 spin_unlock_bh(&tp->lock);
12393 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12395 struct tg3 *tp = netdev_priv(dev);
12397 ering->rx_max_pending = tp->rx_std_ring_mask;
12398 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12399 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12401 ering->rx_jumbo_max_pending = 0;
12403 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12405 ering->rx_pending = tp->rx_pending;
12406 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12407 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12409 ering->rx_jumbo_pending = 0;
12411 ering->tx_pending = tp->napi[0].tx_pending;
12414 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12416 struct tg3 *tp = netdev_priv(dev);
12417 int i, irq_sync = 0, err = 0;
12418 bool reset_phy = false;
12420 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12421 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12422 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12423 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12424 (tg3_flag(tp, TSO_BUG) &&
12425 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12428 if (netif_running(dev)) {
12430 tg3_netif_stop(tp);
12434 tg3_full_lock(tp, irq_sync);
12436 tp->rx_pending = ering->rx_pending;
12438 if (tg3_flag(tp, MAX_RXPEND_64) &&
12439 tp->rx_pending > 63)
12440 tp->rx_pending = 63;
12442 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12443 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12445 for (i = 0; i < tp->irq_max; i++)
12446 tp->napi[i].tx_pending = ering->tx_pending;
12448 if (netif_running(dev)) {
12449 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12450 /* Reset PHY to avoid PHY lock up */
12451 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12452 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12453 tg3_asic_rev(tp) == ASIC_REV_5720)
12456 err = tg3_restart_hw(tp, reset_phy);
12458 tg3_netif_start(tp);
12461 tg3_full_unlock(tp);
12463 if (irq_sync && !err)
12469 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12471 struct tg3 *tp = netdev_priv(dev);
12473 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12475 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12476 epause->rx_pause = 1;
12478 epause->rx_pause = 0;
12480 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12481 epause->tx_pause = 1;
12483 epause->tx_pause = 0;
12486 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12488 struct tg3 *tp = netdev_priv(dev);
12490 bool reset_phy = false;
12492 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12493 tg3_warn_mgmt_link_flap(tp);
12495 if (tg3_flag(tp, USE_PHYLIB)) {
12496 struct phy_device *phydev;
12498 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12500 if (!phy_validate_pause(phydev, epause))
12503 tp->link_config.flowctrl = 0;
12504 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12505 if (epause->rx_pause) {
12506 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12508 if (epause->tx_pause) {
12509 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12511 } else if (epause->tx_pause) {
12512 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12515 if (epause->autoneg)
12516 tg3_flag_set(tp, PAUSE_AUTONEG);
12518 tg3_flag_clear(tp, PAUSE_AUTONEG);
12520 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12521 if (phydev->autoneg) {
12522 /* phy_set_asym_pause() will
12523 * renegotiate the link to inform our
12524 * link partner of our flow control
12525 * settings, even if the flow control
12526 * is forced. Let tg3_adjust_link()
12527 * do the final flow control setup.
12532 if (!epause->autoneg)
12533 tg3_setup_flow_control(tp, 0, 0);
12538 if (netif_running(dev)) {
12539 tg3_netif_stop(tp);
12543 tg3_full_lock(tp, irq_sync);
12545 if (epause->autoneg)
12546 tg3_flag_set(tp, PAUSE_AUTONEG);
12548 tg3_flag_clear(tp, PAUSE_AUTONEG);
12549 if (epause->rx_pause)
12550 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12552 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12553 if (epause->tx_pause)
12554 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12556 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12558 if (netif_running(dev)) {
12559 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12560 /* Reset PHY to avoid PHY lock up */
12561 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12562 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12563 tg3_asic_rev(tp) == ASIC_REV_5720)
12566 err = tg3_restart_hw(tp, reset_phy);
12568 tg3_netif_start(tp);
12571 tg3_full_unlock(tp);
12574 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12579 static int tg3_get_sset_count(struct net_device *dev, int sset)
12583 return TG3_NUM_TEST;
12585 return TG3_NUM_STATS;
12587 return -EOPNOTSUPP;
12591 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12592 u32 *rules __always_unused)
12594 struct tg3 *tp = netdev_priv(dev);
12596 if (!tg3_flag(tp, SUPPORT_MSIX))
12597 return -EOPNOTSUPP;
12599 switch (info->cmd) {
12600 case ETHTOOL_GRXRINGS:
12601 if (netif_running(tp->dev))
12602 info->data = tp->rxq_cnt;
12604 info->data = num_online_cpus();
12605 if (info->data > TG3_RSS_MAX_NUM_QS)
12606 info->data = TG3_RSS_MAX_NUM_QS;
12612 return -EOPNOTSUPP;
12616 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12619 struct tg3 *tp = netdev_priv(dev);
12621 if (tg3_flag(tp, SUPPORT_MSIX))
12622 size = TG3_RSS_INDIR_TBL_SIZE;
12627 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12629 struct tg3 *tp = netdev_priv(dev);
12633 *hfunc = ETH_RSS_HASH_TOP;
12637 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12638 indir[i] = tp->rss_ind_tbl[i];
12643 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12646 struct tg3 *tp = netdev_priv(dev);
12649 /* We require at least one supported parameter to be changed and no
12650 * change in any of the unsupported parameters
12653 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12654 return -EOPNOTSUPP;
12659 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12660 tp->rss_ind_tbl[i] = indir[i];
12662 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12665 /* It is legal to write the indirection
12666 * table while the device is running.
12668 tg3_full_lock(tp, 0);
12669 tg3_rss_write_indir_tbl(tp);
12670 tg3_full_unlock(tp);
12675 static void tg3_get_channels(struct net_device *dev,
12676 struct ethtool_channels *channel)
12678 struct tg3 *tp = netdev_priv(dev);
12679 u32 deflt_qs = netif_get_num_default_rss_queues();
12681 channel->max_rx = tp->rxq_max;
12682 channel->max_tx = tp->txq_max;
12684 if (netif_running(dev)) {
12685 channel->rx_count = tp->rxq_cnt;
12686 channel->tx_count = tp->txq_cnt;
12689 channel->rx_count = tp->rxq_req;
12691 channel->rx_count = min(deflt_qs, tp->rxq_max);
12694 channel->tx_count = tp->txq_req;
12696 channel->tx_count = min(deflt_qs, tp->txq_max);
12700 static int tg3_set_channels(struct net_device *dev,
12701 struct ethtool_channels *channel)
12703 struct tg3 *tp = netdev_priv(dev);
12705 if (!tg3_flag(tp, SUPPORT_MSIX))
12706 return -EOPNOTSUPP;
12708 if (channel->rx_count > tp->rxq_max ||
12709 channel->tx_count > tp->txq_max)
12712 tp->rxq_req = channel->rx_count;
12713 tp->txq_req = channel->tx_count;
12715 if (!netif_running(dev))
12720 tg3_carrier_off(tp);
12722 tg3_start(tp, true, false, false);
12727 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12729 switch (stringset) {
12731 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12734 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12737 WARN_ON(1); /* we need a WARN() */
12742 static int tg3_set_phys_id(struct net_device *dev,
12743 enum ethtool_phys_id_state state)
12745 struct tg3 *tp = netdev_priv(dev);
12748 case ETHTOOL_ID_ACTIVE:
12749 return 1; /* cycle on/off once per second */
12751 case ETHTOOL_ID_ON:
12752 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12753 LED_CTRL_1000MBPS_ON |
12754 LED_CTRL_100MBPS_ON |
12755 LED_CTRL_10MBPS_ON |
12756 LED_CTRL_TRAFFIC_OVERRIDE |
12757 LED_CTRL_TRAFFIC_BLINK |
12758 LED_CTRL_TRAFFIC_LED);
12761 case ETHTOOL_ID_OFF:
12762 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12763 LED_CTRL_TRAFFIC_OVERRIDE);
12766 case ETHTOOL_ID_INACTIVE:
12767 tw32(MAC_LED_CTRL, tp->led_ctrl);
12774 static void tg3_get_ethtool_stats(struct net_device *dev,
12775 struct ethtool_stats *estats, u64 *tmp_stats)
12777 struct tg3 *tp = netdev_priv(dev);
12780 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12782 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12785 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12789 u32 offset = 0, len = 0;
12792 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12795 if (magic == TG3_EEPROM_MAGIC) {
12796 for (offset = TG3_NVM_DIR_START;
12797 offset < TG3_NVM_DIR_END;
12798 offset += TG3_NVM_DIRENT_SIZE) {
12799 if (tg3_nvram_read(tp, offset, &val))
12802 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12803 TG3_NVM_DIRTYPE_EXTVPD)
12807 if (offset != TG3_NVM_DIR_END) {
12808 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12809 if (tg3_nvram_read(tp, offset + 4, &offset))
12812 offset = tg3_nvram_logical_addr(tp, offset);
12815 if (!offset || !len) {
12816 offset = TG3_NVM_VPD_OFF;
12817 len = TG3_NVM_VPD_LEN;
12820 buf = kmalloc(len, GFP_KERNEL);
12824 for (i = 0; i < len; i += 4) {
12825 /* The data is in little-endian format in NVRAM.
12826 * Use the big-endian read routines to preserve
12827 * the byte order as it exists in NVRAM.
12829 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12834 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12846 #define NVRAM_TEST_SIZE 0x100
12847 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12848 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12849 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12850 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12851 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12852 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12853 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12854 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12856 static int tg3_test_nvram(struct tg3 *tp)
12860 int i, j, k, err = 0, size;
12863 if (tg3_flag(tp, NO_NVRAM))
12866 if (tg3_nvram_read(tp, 0, &magic) != 0)
12869 if (magic == TG3_EEPROM_MAGIC)
12870 size = NVRAM_TEST_SIZE;
12871 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12872 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12873 TG3_EEPROM_SB_FORMAT_1) {
12874 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12875 case TG3_EEPROM_SB_REVISION_0:
12876 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12878 case TG3_EEPROM_SB_REVISION_2:
12879 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12881 case TG3_EEPROM_SB_REVISION_3:
12882 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12884 case TG3_EEPROM_SB_REVISION_4:
12885 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12887 case TG3_EEPROM_SB_REVISION_5:
12888 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12890 case TG3_EEPROM_SB_REVISION_6:
12891 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12898 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12899 size = NVRAM_SELFBOOT_HW_SIZE;
12903 buf = kmalloc(size, GFP_KERNEL);
12908 for (i = 0, j = 0; i < size; i += 4, j++) {
12909 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12916 /* Selfboot format */
12917 magic = be32_to_cpu(buf[0]);
12918 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12919 TG3_EEPROM_MAGIC_FW) {
12920 u8 *buf8 = (u8 *) buf, csum8 = 0;
12922 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12923 TG3_EEPROM_SB_REVISION_2) {
12924 /* For rev 2, the csum doesn't include the MBA. */
12925 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12927 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12930 for (i = 0; i < size; i++)
12943 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12944 TG3_EEPROM_MAGIC_HW) {
12945 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12946 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12947 u8 *buf8 = (u8 *) buf;
12949 /* Separate the parity bits and the data bytes. */
12950 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12951 if ((i == 0) || (i == 8)) {
12955 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12956 parity[k++] = buf8[i] & msk;
12958 } else if (i == 16) {
12962 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12963 parity[k++] = buf8[i] & msk;
12966 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12967 parity[k++] = buf8[i] & msk;
12970 data[j++] = buf8[i];
12974 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12975 u8 hw8 = hweight8(data[i]);
12977 if ((hw8 & 0x1) && parity[i])
12979 else if (!(hw8 & 0x1) && !parity[i])
12988 /* Bootstrap checksum at offset 0x10 */
12989 csum = calc_crc((unsigned char *) buf, 0x10);
12990 if (csum != le32_to_cpu(buf[0x10/4]))
12993 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12994 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12995 if (csum != le32_to_cpu(buf[0xfc/4]))
13000 buf = tg3_vpd_readblock(tp, &len);
13004 err = pci_vpd_check_csum(buf, len);
13005 /* go on if no checksum found */
13013 #define TG3_SERDES_TIMEOUT_SEC 2
13014 #define TG3_COPPER_TIMEOUT_SEC 6
13016 static int tg3_test_link(struct tg3 *tp)
13020 if (!netif_running(tp->dev))
13023 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13024 max = TG3_SERDES_TIMEOUT_SEC;
13026 max = TG3_COPPER_TIMEOUT_SEC;
13028 for (i = 0; i < max; i++) {
13032 if (msleep_interruptible(1000))
13039 /* Only test the commonly used registers */
13040 static int tg3_test_registers(struct tg3 *tp)
13042 int i, is_5705, is_5750;
13043 u32 offset, read_mask, write_mask, val, save_val, read_val;
13047 #define TG3_FL_5705 0x1
13048 #define TG3_FL_NOT_5705 0x2
13049 #define TG3_FL_NOT_5788 0x4
13050 #define TG3_FL_NOT_5750 0x8
13054 /* MAC Control Registers */
13055 { MAC_MODE, TG3_FL_NOT_5705,
13056 0x00000000, 0x00ef6f8c },
13057 { MAC_MODE, TG3_FL_5705,
13058 0x00000000, 0x01ef6b8c },
13059 { MAC_STATUS, TG3_FL_NOT_5705,
13060 0x03800107, 0x00000000 },
13061 { MAC_STATUS, TG3_FL_5705,
13062 0x03800100, 0x00000000 },
13063 { MAC_ADDR_0_HIGH, 0x0000,
13064 0x00000000, 0x0000ffff },
13065 { MAC_ADDR_0_LOW, 0x0000,
13066 0x00000000, 0xffffffff },
13067 { MAC_RX_MTU_SIZE, 0x0000,
13068 0x00000000, 0x0000ffff },
13069 { MAC_TX_MODE, 0x0000,
13070 0x00000000, 0x00000070 },
13071 { MAC_TX_LENGTHS, 0x0000,
13072 0x00000000, 0x00003fff },
13073 { MAC_RX_MODE, TG3_FL_NOT_5705,
13074 0x00000000, 0x000007fc },
13075 { MAC_RX_MODE, TG3_FL_5705,
13076 0x00000000, 0x000007dc },
13077 { MAC_HASH_REG_0, 0x0000,
13078 0x00000000, 0xffffffff },
13079 { MAC_HASH_REG_1, 0x0000,
13080 0x00000000, 0xffffffff },
13081 { MAC_HASH_REG_2, 0x0000,
13082 0x00000000, 0xffffffff },
13083 { MAC_HASH_REG_3, 0x0000,
13084 0x00000000, 0xffffffff },
13086 /* Receive Data and Receive BD Initiator Control Registers. */
13087 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13088 0x00000000, 0xffffffff },
13089 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13090 0x00000000, 0xffffffff },
13091 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13092 0x00000000, 0x00000003 },
13093 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13094 0x00000000, 0xffffffff },
13095 { RCVDBDI_STD_BD+0, 0x0000,
13096 0x00000000, 0xffffffff },
13097 { RCVDBDI_STD_BD+4, 0x0000,
13098 0x00000000, 0xffffffff },
13099 { RCVDBDI_STD_BD+8, 0x0000,
13100 0x00000000, 0xffff0002 },
13101 { RCVDBDI_STD_BD+0xc, 0x0000,
13102 0x00000000, 0xffffffff },
13104 /* Receive BD Initiator Control Registers. */
13105 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13106 0x00000000, 0xffffffff },
13107 { RCVBDI_STD_THRESH, TG3_FL_5705,
13108 0x00000000, 0x000003ff },
13109 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13110 0x00000000, 0xffffffff },
13112 /* Host Coalescing Control Registers. */
13113 { HOSTCC_MODE, TG3_FL_NOT_5705,
13114 0x00000000, 0x00000004 },
13115 { HOSTCC_MODE, TG3_FL_5705,
13116 0x00000000, 0x000000f6 },
13117 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13118 0x00000000, 0xffffffff },
13119 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13120 0x00000000, 0x000003ff },
13121 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13122 0x00000000, 0xffffffff },
13123 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13124 0x00000000, 0x000003ff },
13125 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13126 0x00000000, 0xffffffff },
13127 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13128 0x00000000, 0x000000ff },
13129 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13130 0x00000000, 0xffffffff },
13131 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13132 0x00000000, 0x000000ff },
13133 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13134 0x00000000, 0xffffffff },
13135 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13136 0x00000000, 0xffffffff },
13137 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13138 0x00000000, 0xffffffff },
13139 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13140 0x00000000, 0x000000ff },
13141 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13142 0x00000000, 0xffffffff },
13143 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13144 0x00000000, 0x000000ff },
13145 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13146 0x00000000, 0xffffffff },
13147 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13148 0x00000000, 0xffffffff },
13149 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13150 0x00000000, 0xffffffff },
13151 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13152 0x00000000, 0xffffffff },
13153 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13154 0x00000000, 0xffffffff },
13155 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13156 0xffffffff, 0x00000000 },
13157 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13158 0xffffffff, 0x00000000 },
13160 /* Buffer Manager Control Registers. */
13161 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13162 0x00000000, 0x007fff80 },
13163 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13164 0x00000000, 0x007fffff },
13165 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13166 0x00000000, 0x0000003f },
13167 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13168 0x00000000, 0x000001ff },
13169 { BUFMGR_MB_HIGH_WATER, 0x0000,
13170 0x00000000, 0x000001ff },
13171 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13172 0xffffffff, 0x00000000 },
13173 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13174 0xffffffff, 0x00000000 },
13176 /* Mailbox Registers */
13177 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13178 0x00000000, 0x000001ff },
13179 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13180 0x00000000, 0x000001ff },
13181 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13182 0x00000000, 0x000007ff },
13183 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13184 0x00000000, 0x000001ff },
13186 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13189 is_5705 = is_5750 = 0;
13190 if (tg3_flag(tp, 5705_PLUS)) {
13192 if (tg3_flag(tp, 5750_PLUS))
13196 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13197 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13200 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13203 if (tg3_flag(tp, IS_5788) &&
13204 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13207 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13210 offset = (u32) reg_tbl[i].offset;
13211 read_mask = reg_tbl[i].read_mask;
13212 write_mask = reg_tbl[i].write_mask;
13214 /* Save the original register content */
13215 save_val = tr32(offset);
13217 /* Determine the read-only value. */
13218 read_val = save_val & read_mask;
13220 /* Write zero to the register, then make sure the read-only bits
13221 * are not changed and the read/write bits are all zeros.
13225 val = tr32(offset);
13227 /* Test the read-only and read/write bits. */
13228 if (((val & read_mask) != read_val) || (val & write_mask))
13231 /* Write ones to all the bits defined by RdMask and WrMask, then
13232 * make sure the read-only bits are not changed and the
13233 * read/write bits are all ones.
13235 tw32(offset, read_mask | write_mask);
13237 val = tr32(offset);
13239 /* Test the read-only bits. */
13240 if ((val & read_mask) != read_val)
13243 /* Test the read/write bits. */
13244 if ((val & write_mask) != write_mask)
13247 tw32(offset, save_val);
13253 if (netif_msg_hw(tp))
13254 netdev_err(tp->dev,
13255 "Register test failed at offset %x\n", offset);
13256 tw32(offset, save_val);
13260 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13262 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13266 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13267 for (j = 0; j < len; j += 4) {
13270 tg3_write_mem(tp, offset + j, test_pattern[i]);
13271 tg3_read_mem(tp, offset + j, &val);
13272 if (val != test_pattern[i])
13279 static int tg3_test_memory(struct tg3 *tp)
13281 static struct mem_entry {
13284 } mem_tbl_570x[] = {
13285 { 0x00000000, 0x00b50},
13286 { 0x00002000, 0x1c000},
13287 { 0xffffffff, 0x00000}
13288 }, mem_tbl_5705[] = {
13289 { 0x00000100, 0x0000c},
13290 { 0x00000200, 0x00008},
13291 { 0x00004000, 0x00800},
13292 { 0x00006000, 0x01000},
13293 { 0x00008000, 0x02000},
13294 { 0x00010000, 0x0e000},
13295 { 0xffffffff, 0x00000}
13296 }, mem_tbl_5755[] = {
13297 { 0x00000200, 0x00008},
13298 { 0x00004000, 0x00800},
13299 { 0x00006000, 0x00800},
13300 { 0x00008000, 0x02000},
13301 { 0x00010000, 0x0c000},
13302 { 0xffffffff, 0x00000}
13303 }, mem_tbl_5906[] = {
13304 { 0x00000200, 0x00008},
13305 { 0x00004000, 0x00400},
13306 { 0x00006000, 0x00400},
13307 { 0x00008000, 0x01000},
13308 { 0x00010000, 0x01000},
13309 { 0xffffffff, 0x00000}
13310 }, mem_tbl_5717[] = {
13311 { 0x00000200, 0x00008},
13312 { 0x00010000, 0x0a000},
13313 { 0x00020000, 0x13c00},
13314 { 0xffffffff, 0x00000}
13315 }, mem_tbl_57765[] = {
13316 { 0x00000200, 0x00008},
13317 { 0x00004000, 0x00800},
13318 { 0x00006000, 0x09800},
13319 { 0x00010000, 0x0a000},
13320 { 0xffffffff, 0x00000}
13322 struct mem_entry *mem_tbl;
13326 if (tg3_flag(tp, 5717_PLUS))
13327 mem_tbl = mem_tbl_5717;
13328 else if (tg3_flag(tp, 57765_CLASS) ||
13329 tg3_asic_rev(tp) == ASIC_REV_5762)
13330 mem_tbl = mem_tbl_57765;
13331 else if (tg3_flag(tp, 5755_PLUS))
13332 mem_tbl = mem_tbl_5755;
13333 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13334 mem_tbl = mem_tbl_5906;
13335 else if (tg3_flag(tp, 5705_PLUS))
13336 mem_tbl = mem_tbl_5705;
13338 mem_tbl = mem_tbl_570x;
13340 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13341 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13349 #define TG3_TSO_MSS 500
13351 #define TG3_TSO_IP_HDR_LEN 20
13352 #define TG3_TSO_TCP_HDR_LEN 20
13353 #define TG3_TSO_TCP_OPT_LEN 12
13355 static const u8 tg3_tso_header[] = {
13357 0x45, 0x00, 0x00, 0x00,
13358 0x00, 0x00, 0x40, 0x00,
13359 0x40, 0x06, 0x00, 0x00,
13360 0x0a, 0x00, 0x00, 0x01,
13361 0x0a, 0x00, 0x00, 0x02,
13362 0x0d, 0x00, 0xe0, 0x00,
13363 0x00, 0x00, 0x01, 0x00,
13364 0x00, 0x00, 0x02, 0x00,
13365 0x80, 0x10, 0x10, 0x00,
13366 0x14, 0x09, 0x00, 0x00,
13367 0x01, 0x01, 0x08, 0x0a,
13368 0x11, 0x11, 0x11, 0x11,
13369 0x11, 0x11, 0x11, 0x11,
13372 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13374 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13375 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13377 struct sk_buff *skb;
13378 u8 *tx_data, *rx_data;
13380 int num_pkts, tx_len, rx_len, i, err;
13381 struct tg3_rx_buffer_desc *desc;
13382 struct tg3_napi *tnapi, *rnapi;
13383 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13385 tnapi = &tp->napi[0];
13386 rnapi = &tp->napi[0];
13387 if (tp->irq_cnt > 1) {
13388 if (tg3_flag(tp, ENABLE_RSS))
13389 rnapi = &tp->napi[1];
13390 if (tg3_flag(tp, ENABLE_TSS))
13391 tnapi = &tp->napi[1];
13393 coal_now = tnapi->coal_now | rnapi->coal_now;
13398 skb = netdev_alloc_skb(tp->dev, tx_len);
13402 tx_data = skb_put(skb, tx_len);
13403 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13404 memset(tx_data + ETH_ALEN, 0x0, 8);
13406 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13408 if (tso_loopback) {
13409 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13411 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13412 TG3_TSO_TCP_OPT_LEN;
13414 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13415 sizeof(tg3_tso_header));
13418 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13419 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13421 /* Set the total length field in the IP header */
13422 iph->tot_len = htons((u16)(mss + hdr_len));
13424 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13425 TXD_FLAG_CPU_POST_DMA);
13427 if (tg3_flag(tp, HW_TSO_1) ||
13428 tg3_flag(tp, HW_TSO_2) ||
13429 tg3_flag(tp, HW_TSO_3)) {
13431 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13432 th = (struct tcphdr *)&tx_data[val];
13435 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13437 if (tg3_flag(tp, HW_TSO_3)) {
13438 mss |= (hdr_len & 0xc) << 12;
13439 if (hdr_len & 0x10)
13440 base_flags |= 0x00000010;
13441 base_flags |= (hdr_len & 0x3e0) << 5;
13442 } else if (tg3_flag(tp, HW_TSO_2))
13443 mss |= hdr_len << 9;
13444 else if (tg3_flag(tp, HW_TSO_1) ||
13445 tg3_asic_rev(tp) == ASIC_REV_5705) {
13446 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13448 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13451 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13454 data_off = ETH_HLEN;
13456 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13457 tx_len > VLAN_ETH_FRAME_LEN)
13458 base_flags |= TXD_FLAG_JMB_PKT;
13461 for (i = data_off; i < tx_len; i++)
13462 tx_data[i] = (u8) (i & 0xff);
13464 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13465 if (dma_mapping_error(&tp->pdev->dev, map)) {
13466 dev_kfree_skb(skb);
13470 val = tnapi->tx_prod;
13471 tnapi->tx_buffers[val].skb = skb;
13472 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13474 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13479 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13481 budget = tg3_tx_avail(tnapi);
13482 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13483 base_flags | TXD_FLAG_END, mss, 0)) {
13484 tnapi->tx_buffers[val].skb = NULL;
13485 dev_kfree_skb(skb);
13491 /* Sync BD data before updating mailbox */
13494 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13495 tr32_mailbox(tnapi->prodmbox);
13499 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13500 for (i = 0; i < 35; i++) {
13501 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13506 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13507 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13508 if ((tx_idx == tnapi->tx_prod) &&
13509 (rx_idx == (rx_start_idx + num_pkts)))
13513 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13514 dev_kfree_skb(skb);
13516 if (tx_idx != tnapi->tx_prod)
13519 if (rx_idx != rx_start_idx + num_pkts)
13523 while (rx_idx != rx_start_idx) {
13524 desc = &rnapi->rx_rcb[rx_start_idx++];
13525 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13526 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13528 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13529 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13532 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13535 if (!tso_loopback) {
13536 if (rx_len != tx_len)
13539 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13540 if (opaque_key != RXD_OPAQUE_RING_STD)
13543 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13546 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13547 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13548 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13552 if (opaque_key == RXD_OPAQUE_RING_STD) {
13553 rx_data = tpr->rx_std_buffers[desc_idx].data;
13554 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13556 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13557 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13558 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13563 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13566 rx_data += TG3_RX_OFFSET(tp);
13567 for (i = data_off; i < rx_len; i++, val++) {
13568 if (*(rx_data + i) != (u8) (val & 0xff))
13575 /* tg3_free_rings will unmap and free the rx_data */
13580 #define TG3_STD_LOOPBACK_FAILED 1
13581 #define TG3_JMB_LOOPBACK_FAILED 2
13582 #define TG3_TSO_LOOPBACK_FAILED 4
13583 #define TG3_LOOPBACK_FAILED \
13584 (TG3_STD_LOOPBACK_FAILED | \
13585 TG3_JMB_LOOPBACK_FAILED | \
13586 TG3_TSO_LOOPBACK_FAILED)
13588 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13592 u32 jmb_pkt_sz = 9000;
13595 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13597 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13598 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13600 if (!netif_running(tp->dev)) {
13601 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13602 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13604 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13608 err = tg3_reset_hw(tp, true);
13610 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13611 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13613 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13617 if (tg3_flag(tp, ENABLE_RSS)) {
13620 /* Reroute all rx packets to the 1st queue */
13621 for (i = MAC_RSS_INDIR_TBL_0;
13622 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13626 /* HW errata - mac loopback fails in some cases on 5780.
13627 * Normal traffic and PHY loopback are not affected by
13628 * errata. Also, the MAC loopback test is deprecated for
13629 * all newer ASIC revisions.
13631 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13632 !tg3_flag(tp, CPMU_PRESENT)) {
13633 tg3_mac_loopback(tp, true);
13635 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13636 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13638 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13639 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13640 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13642 tg3_mac_loopback(tp, false);
13645 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13646 !tg3_flag(tp, USE_PHYLIB)) {
13649 tg3_phy_lpbk_set(tp, 0, false);
13651 /* Wait for link */
13652 for (i = 0; i < 100; i++) {
13653 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13658 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13659 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13660 if (tg3_flag(tp, TSO_CAPABLE) &&
13661 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13662 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13663 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13664 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13665 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13668 tg3_phy_lpbk_set(tp, 0, true);
13670 /* All link indications report up, but the hardware
13671 * isn't really ready for about 20 msec. Double it
13676 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13677 data[TG3_EXT_LOOPB_TEST] |=
13678 TG3_STD_LOOPBACK_FAILED;
13679 if (tg3_flag(tp, TSO_CAPABLE) &&
13680 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13681 data[TG3_EXT_LOOPB_TEST] |=
13682 TG3_TSO_LOOPBACK_FAILED;
13683 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13684 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13685 data[TG3_EXT_LOOPB_TEST] |=
13686 TG3_JMB_LOOPBACK_FAILED;
13689 /* Re-enable gphy autopowerdown. */
13690 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13691 tg3_phy_toggle_apd(tp, true);
13694 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13695 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13698 tp->phy_flags |= eee_cap;
13703 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13706 struct tg3 *tp = netdev_priv(dev);
13707 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13709 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13710 if (tg3_power_up(tp)) {
13711 etest->flags |= ETH_TEST_FL_FAILED;
13712 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13715 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13718 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13720 if (tg3_test_nvram(tp) != 0) {
13721 etest->flags |= ETH_TEST_FL_FAILED;
13722 data[TG3_NVRAM_TEST] = 1;
13724 if (!doextlpbk && tg3_test_link(tp)) {
13725 etest->flags |= ETH_TEST_FL_FAILED;
13726 data[TG3_LINK_TEST] = 1;
13728 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13729 int err, err2 = 0, irq_sync = 0;
13731 if (netif_running(dev)) {
13733 tg3_netif_stop(tp);
13737 tg3_full_lock(tp, irq_sync);
13738 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13739 err = tg3_nvram_lock(tp);
13740 tg3_halt_cpu(tp, RX_CPU_BASE);
13741 if (!tg3_flag(tp, 5705_PLUS))
13742 tg3_halt_cpu(tp, TX_CPU_BASE);
13744 tg3_nvram_unlock(tp);
13746 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13749 if (tg3_test_registers(tp) != 0) {
13750 etest->flags |= ETH_TEST_FL_FAILED;
13751 data[TG3_REGISTER_TEST] = 1;
13754 if (tg3_test_memory(tp) != 0) {
13755 etest->flags |= ETH_TEST_FL_FAILED;
13756 data[TG3_MEMORY_TEST] = 1;
13760 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13762 if (tg3_test_loopback(tp, data, doextlpbk))
13763 etest->flags |= ETH_TEST_FL_FAILED;
13765 tg3_full_unlock(tp);
13767 if (tg3_test_interrupt(tp) != 0) {
13768 etest->flags |= ETH_TEST_FL_FAILED;
13769 data[TG3_INTERRUPT_TEST] = 1;
13772 tg3_full_lock(tp, 0);
13774 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13775 if (netif_running(dev)) {
13776 tg3_flag_set(tp, INIT_COMPLETE);
13777 err2 = tg3_restart_hw(tp, true);
13779 tg3_netif_start(tp);
13782 tg3_full_unlock(tp);
13784 if (irq_sync && !err2)
13787 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13788 tg3_power_down_prepare(tp);
13792 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13794 struct tg3 *tp = netdev_priv(dev);
13795 struct hwtstamp_config stmpconf;
13797 if (!tg3_flag(tp, PTP_CAPABLE))
13798 return -EOPNOTSUPP;
13800 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13803 if (stmpconf.flags)
13806 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13807 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13810 switch (stmpconf.rx_filter) {
13811 case HWTSTAMP_FILTER_NONE:
13814 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13815 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13816 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13818 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13819 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13820 TG3_RX_PTP_CTL_SYNC_EVNT;
13822 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13823 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13824 TG3_RX_PTP_CTL_DELAY_REQ;
13826 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13827 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13828 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13830 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13831 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13832 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13834 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13835 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13836 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13838 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13839 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13840 TG3_RX_PTP_CTL_SYNC_EVNT;
13842 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13843 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13844 TG3_RX_PTP_CTL_SYNC_EVNT;
13846 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13847 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13848 TG3_RX_PTP_CTL_SYNC_EVNT;
13850 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13851 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13852 TG3_RX_PTP_CTL_DELAY_REQ;
13854 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13855 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13856 TG3_RX_PTP_CTL_DELAY_REQ;
13858 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13859 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13860 TG3_RX_PTP_CTL_DELAY_REQ;
13866 if (netif_running(dev) && tp->rxptpctl)
13867 tw32(TG3_RX_PTP_CTL,
13868 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13870 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13871 tg3_flag_set(tp, TX_TSTAMP_EN);
13873 tg3_flag_clear(tp, TX_TSTAMP_EN);
13875 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13879 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13881 struct tg3 *tp = netdev_priv(dev);
13882 struct hwtstamp_config stmpconf;
13884 if (!tg3_flag(tp, PTP_CAPABLE))
13885 return -EOPNOTSUPP;
13887 stmpconf.flags = 0;
13888 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13889 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13891 switch (tp->rxptpctl) {
13893 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13895 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13896 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13898 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13899 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13901 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13902 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13904 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13905 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13907 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13908 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13910 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13911 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13913 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13914 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13916 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13917 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13919 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13920 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13922 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13923 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13925 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13926 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13928 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13929 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13936 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13940 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13942 struct mii_ioctl_data *data = if_mii(ifr);
13943 struct tg3 *tp = netdev_priv(dev);
13946 if (tg3_flag(tp, USE_PHYLIB)) {
13947 struct phy_device *phydev;
13948 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13950 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13951 return phy_mii_ioctl(phydev, ifr, cmd);
13956 data->phy_id = tp->phy_addr;
13959 case SIOCGMIIREG: {
13962 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13963 break; /* We have no PHY */
13965 if (!netif_running(dev))
13968 spin_lock_bh(&tp->lock);
13969 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13970 data->reg_num & 0x1f, &mii_regval);
13971 spin_unlock_bh(&tp->lock);
13973 data->val_out = mii_regval;
13979 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13980 break; /* We have no PHY */
13982 if (!netif_running(dev))
13985 spin_lock_bh(&tp->lock);
13986 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13987 data->reg_num & 0x1f, data->val_in);
13988 spin_unlock_bh(&tp->lock);
13992 case SIOCSHWTSTAMP:
13993 return tg3_hwtstamp_set(dev, ifr);
13995 case SIOCGHWTSTAMP:
13996 return tg3_hwtstamp_get(dev, ifr);
14002 return -EOPNOTSUPP;
14005 static int tg3_get_coalesce(struct net_device *dev,
14006 struct ethtool_coalesce *ec,
14007 struct kernel_ethtool_coalesce *kernel_coal,
14008 struct netlink_ext_ack *extack)
14010 struct tg3 *tp = netdev_priv(dev);
14012 memcpy(ec, &tp->coal, sizeof(*ec));
14016 static int tg3_set_coalesce(struct net_device *dev,
14017 struct ethtool_coalesce *ec,
14018 struct kernel_ethtool_coalesce *kernel_coal,
14019 struct netlink_ext_ack *extack)
14021 struct tg3 *tp = netdev_priv(dev);
14022 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14023 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14025 if (!tg3_flag(tp, 5705_PLUS)) {
14026 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14027 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14028 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14029 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14032 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14033 (!ec->rx_coalesce_usecs) ||
14034 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14035 (!ec->tx_coalesce_usecs) ||
14036 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14037 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14038 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14039 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14040 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14041 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14042 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14043 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14046 /* Only copy relevant parameters, ignore all others. */
14047 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14048 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14049 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14050 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14051 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14052 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14053 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14054 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14055 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14057 if (netif_running(dev)) {
14058 tg3_full_lock(tp, 0);
14059 __tg3_set_coalesce(tp, &tp->coal);
14060 tg3_full_unlock(tp);
14065 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14067 struct tg3 *tp = netdev_priv(dev);
14069 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14070 netdev_warn(tp->dev, "Board does not support EEE!\n");
14071 return -EOPNOTSUPP;
14074 if (edata->advertised != tp->eee.advertised) {
14075 netdev_warn(tp->dev,
14076 "Direct manipulation of EEE advertisement is not supported\n");
14080 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14081 netdev_warn(tp->dev,
14082 "Maximal Tx Lpi timer supported is %#x(u)\n",
14083 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14089 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14090 tg3_warn_mgmt_link_flap(tp);
14092 if (netif_running(tp->dev)) {
14093 tg3_full_lock(tp, 0);
14096 tg3_full_unlock(tp);
14102 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14104 struct tg3 *tp = netdev_priv(dev);
14106 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14107 netdev_warn(tp->dev,
14108 "Board does not support EEE!\n");
14109 return -EOPNOTSUPP;
14116 static const struct ethtool_ops tg3_ethtool_ops = {
14117 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14118 ETHTOOL_COALESCE_MAX_FRAMES |
14119 ETHTOOL_COALESCE_USECS_IRQ |
14120 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14121 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14122 .get_drvinfo = tg3_get_drvinfo,
14123 .get_regs_len = tg3_get_regs_len,
14124 .get_regs = tg3_get_regs,
14125 .get_wol = tg3_get_wol,
14126 .set_wol = tg3_set_wol,
14127 .get_msglevel = tg3_get_msglevel,
14128 .set_msglevel = tg3_set_msglevel,
14129 .nway_reset = tg3_nway_reset,
14130 .get_link = ethtool_op_get_link,
14131 .get_eeprom_len = tg3_get_eeprom_len,
14132 .get_eeprom = tg3_get_eeprom,
14133 .set_eeprom = tg3_set_eeprom,
14134 .get_ringparam = tg3_get_ringparam,
14135 .set_ringparam = tg3_set_ringparam,
14136 .get_pauseparam = tg3_get_pauseparam,
14137 .set_pauseparam = tg3_set_pauseparam,
14138 .self_test = tg3_self_test,
14139 .get_strings = tg3_get_strings,
14140 .set_phys_id = tg3_set_phys_id,
14141 .get_ethtool_stats = tg3_get_ethtool_stats,
14142 .get_coalesce = tg3_get_coalesce,
14143 .set_coalesce = tg3_set_coalesce,
14144 .get_sset_count = tg3_get_sset_count,
14145 .get_rxnfc = tg3_get_rxnfc,
14146 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14147 .get_rxfh = tg3_get_rxfh,
14148 .set_rxfh = tg3_set_rxfh,
14149 .get_channels = tg3_get_channels,
14150 .set_channels = tg3_set_channels,
14151 .get_ts_info = tg3_get_ts_info,
14152 .get_eee = tg3_get_eee,
14153 .set_eee = tg3_set_eee,
14154 .get_link_ksettings = tg3_get_link_ksettings,
14155 .set_link_ksettings = tg3_set_link_ksettings,
14158 static void tg3_get_stats64(struct net_device *dev,
14159 struct rtnl_link_stats64 *stats)
14161 struct tg3 *tp = netdev_priv(dev);
14163 spin_lock_bh(&tp->lock);
14164 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14165 *stats = tp->net_stats_prev;
14166 spin_unlock_bh(&tp->lock);
14170 tg3_get_nstats(tp, stats);
14171 spin_unlock_bh(&tp->lock);
14174 static void tg3_set_rx_mode(struct net_device *dev)
14176 struct tg3 *tp = netdev_priv(dev);
14178 if (!netif_running(dev))
14181 tg3_full_lock(tp, 0);
14182 __tg3_set_rx_mode(dev);
14183 tg3_full_unlock(tp);
14186 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14189 dev->mtu = new_mtu;
14191 if (new_mtu > ETH_DATA_LEN) {
14192 if (tg3_flag(tp, 5780_CLASS)) {
14193 netdev_update_features(dev);
14194 tg3_flag_clear(tp, TSO_CAPABLE);
14196 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14199 if (tg3_flag(tp, 5780_CLASS)) {
14200 tg3_flag_set(tp, TSO_CAPABLE);
14201 netdev_update_features(dev);
14203 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14207 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14209 struct tg3 *tp = netdev_priv(dev);
14211 bool reset_phy = false;
14213 if (!netif_running(dev)) {
14214 /* We'll just catch it later when the
14217 tg3_set_mtu(dev, tp, new_mtu);
14223 tg3_netif_stop(tp);
14225 tg3_set_mtu(dev, tp, new_mtu);
14227 tg3_full_lock(tp, 1);
14229 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14231 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14232 * breaks all requests to 256 bytes.
14234 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14235 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14236 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14237 tg3_asic_rev(tp) == ASIC_REV_5720)
14240 err = tg3_restart_hw(tp, reset_phy);
14243 tg3_netif_start(tp);
14245 tg3_full_unlock(tp);
14253 static const struct net_device_ops tg3_netdev_ops = {
14254 .ndo_open = tg3_open,
14255 .ndo_stop = tg3_close,
14256 .ndo_start_xmit = tg3_start_xmit,
14257 .ndo_get_stats64 = tg3_get_stats64,
14258 .ndo_validate_addr = eth_validate_addr,
14259 .ndo_set_rx_mode = tg3_set_rx_mode,
14260 .ndo_set_mac_address = tg3_set_mac_addr,
14261 .ndo_eth_ioctl = tg3_ioctl,
14262 .ndo_tx_timeout = tg3_tx_timeout,
14263 .ndo_change_mtu = tg3_change_mtu,
14264 .ndo_fix_features = tg3_fix_features,
14265 .ndo_set_features = tg3_set_features,
14266 #ifdef CONFIG_NET_POLL_CONTROLLER
14267 .ndo_poll_controller = tg3_poll_controller,
14271 static void tg3_get_eeprom_size(struct tg3 *tp)
14273 u32 cursize, val, magic;
14275 tp->nvram_size = EEPROM_CHIP_SIZE;
14277 if (tg3_nvram_read(tp, 0, &magic) != 0)
14280 if ((magic != TG3_EEPROM_MAGIC) &&
14281 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14282 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14286 * Size the chip by reading offsets at increasing powers of two.
14287 * When we encounter our validation signature, we know the addressing
14288 * has wrapped around, and thus have our chip size.
14292 while (cursize < tp->nvram_size) {
14293 if (tg3_nvram_read(tp, cursize, &val) != 0)
14302 tp->nvram_size = cursize;
14305 static void tg3_get_nvram_size(struct tg3 *tp)
14309 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14312 /* Selfboot format */
14313 if (val != TG3_EEPROM_MAGIC) {
14314 tg3_get_eeprom_size(tp);
14318 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14320 /* This is confusing. We want to operate on the
14321 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14322 * call will read from NVRAM and byteswap the data
14323 * according to the byteswapping settings for all
14324 * other register accesses. This ensures the data we
14325 * want will always reside in the lower 16-bits.
14326 * However, the data in NVRAM is in LE format, which
14327 * means the data from the NVRAM read will always be
14328 * opposite the endianness of the CPU. The 16-bit
14329 * byteswap then brings the data to CPU endianness.
14331 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14335 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14338 static void tg3_get_nvram_info(struct tg3 *tp)
14342 nvcfg1 = tr32(NVRAM_CFG1);
14343 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14344 tg3_flag_set(tp, FLASH);
14346 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14347 tw32(NVRAM_CFG1, nvcfg1);
14350 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14351 tg3_flag(tp, 5780_CLASS)) {
14352 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14353 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14354 tp->nvram_jedecnum = JEDEC_ATMEL;
14355 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14356 tg3_flag_set(tp, NVRAM_BUFFERED);
14358 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14359 tp->nvram_jedecnum = JEDEC_ATMEL;
14360 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14362 case FLASH_VENDOR_ATMEL_EEPROM:
14363 tp->nvram_jedecnum = JEDEC_ATMEL;
14364 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14365 tg3_flag_set(tp, NVRAM_BUFFERED);
14367 case FLASH_VENDOR_ST:
14368 tp->nvram_jedecnum = JEDEC_ST;
14369 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14370 tg3_flag_set(tp, NVRAM_BUFFERED);
14372 case FLASH_VENDOR_SAIFUN:
14373 tp->nvram_jedecnum = JEDEC_SAIFUN;
14374 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14376 case FLASH_VENDOR_SST_SMALL:
14377 case FLASH_VENDOR_SST_LARGE:
14378 tp->nvram_jedecnum = JEDEC_SST;
14379 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14383 tp->nvram_jedecnum = JEDEC_ATMEL;
14384 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14385 tg3_flag_set(tp, NVRAM_BUFFERED);
14389 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14391 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14392 case FLASH_5752PAGE_SIZE_256:
14393 tp->nvram_pagesize = 256;
14395 case FLASH_5752PAGE_SIZE_512:
14396 tp->nvram_pagesize = 512;
14398 case FLASH_5752PAGE_SIZE_1K:
14399 tp->nvram_pagesize = 1024;
14401 case FLASH_5752PAGE_SIZE_2K:
14402 tp->nvram_pagesize = 2048;
14404 case FLASH_5752PAGE_SIZE_4K:
14405 tp->nvram_pagesize = 4096;
14407 case FLASH_5752PAGE_SIZE_264:
14408 tp->nvram_pagesize = 264;
14410 case FLASH_5752PAGE_SIZE_528:
14411 tp->nvram_pagesize = 528;
14416 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14420 nvcfg1 = tr32(NVRAM_CFG1);
14422 /* NVRAM protection for TPM */
14423 if (nvcfg1 & (1 << 27))
14424 tg3_flag_set(tp, PROTECTED_NVRAM);
14426 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14427 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14428 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14429 tp->nvram_jedecnum = JEDEC_ATMEL;
14430 tg3_flag_set(tp, NVRAM_BUFFERED);
14432 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14433 tp->nvram_jedecnum = JEDEC_ATMEL;
14434 tg3_flag_set(tp, NVRAM_BUFFERED);
14435 tg3_flag_set(tp, FLASH);
14437 case FLASH_5752VENDOR_ST_M45PE10:
14438 case FLASH_5752VENDOR_ST_M45PE20:
14439 case FLASH_5752VENDOR_ST_M45PE40:
14440 tp->nvram_jedecnum = JEDEC_ST;
14441 tg3_flag_set(tp, NVRAM_BUFFERED);
14442 tg3_flag_set(tp, FLASH);
14446 if (tg3_flag(tp, FLASH)) {
14447 tg3_nvram_get_pagesize(tp, nvcfg1);
14449 /* For eeprom, set pagesize to maximum eeprom size */
14450 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14452 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14453 tw32(NVRAM_CFG1, nvcfg1);
14457 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14459 u32 nvcfg1, protect = 0;
14461 nvcfg1 = tr32(NVRAM_CFG1);
14463 /* NVRAM protection for TPM */
14464 if (nvcfg1 & (1 << 27)) {
14465 tg3_flag_set(tp, PROTECTED_NVRAM);
14469 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14471 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14472 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14473 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14474 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14475 tp->nvram_jedecnum = JEDEC_ATMEL;
14476 tg3_flag_set(tp, NVRAM_BUFFERED);
14477 tg3_flag_set(tp, FLASH);
14478 tp->nvram_pagesize = 264;
14479 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14480 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14481 tp->nvram_size = (protect ? 0x3e200 :
14482 TG3_NVRAM_SIZE_512KB);
14483 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14484 tp->nvram_size = (protect ? 0x1f200 :
14485 TG3_NVRAM_SIZE_256KB);
14487 tp->nvram_size = (protect ? 0x1f200 :
14488 TG3_NVRAM_SIZE_128KB);
14490 case FLASH_5752VENDOR_ST_M45PE10:
14491 case FLASH_5752VENDOR_ST_M45PE20:
14492 case FLASH_5752VENDOR_ST_M45PE40:
14493 tp->nvram_jedecnum = JEDEC_ST;
14494 tg3_flag_set(tp, NVRAM_BUFFERED);
14495 tg3_flag_set(tp, FLASH);
14496 tp->nvram_pagesize = 256;
14497 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14498 tp->nvram_size = (protect ?
14499 TG3_NVRAM_SIZE_64KB :
14500 TG3_NVRAM_SIZE_128KB);
14501 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14502 tp->nvram_size = (protect ?
14503 TG3_NVRAM_SIZE_64KB :
14504 TG3_NVRAM_SIZE_256KB);
14506 tp->nvram_size = (protect ?
14507 TG3_NVRAM_SIZE_128KB :
14508 TG3_NVRAM_SIZE_512KB);
14513 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14517 nvcfg1 = tr32(NVRAM_CFG1);
14519 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14520 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14521 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14522 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14523 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14524 tp->nvram_jedecnum = JEDEC_ATMEL;
14525 tg3_flag_set(tp, NVRAM_BUFFERED);
14526 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14528 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14529 tw32(NVRAM_CFG1, nvcfg1);
14531 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14532 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14533 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14534 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14535 tp->nvram_jedecnum = JEDEC_ATMEL;
14536 tg3_flag_set(tp, NVRAM_BUFFERED);
14537 tg3_flag_set(tp, FLASH);
14538 tp->nvram_pagesize = 264;
14540 case FLASH_5752VENDOR_ST_M45PE10:
14541 case FLASH_5752VENDOR_ST_M45PE20:
14542 case FLASH_5752VENDOR_ST_M45PE40:
14543 tp->nvram_jedecnum = JEDEC_ST;
14544 tg3_flag_set(tp, NVRAM_BUFFERED);
14545 tg3_flag_set(tp, FLASH);
14546 tp->nvram_pagesize = 256;
14551 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14553 u32 nvcfg1, protect = 0;
14555 nvcfg1 = tr32(NVRAM_CFG1);
14557 /* NVRAM protection for TPM */
14558 if (nvcfg1 & (1 << 27)) {
14559 tg3_flag_set(tp, PROTECTED_NVRAM);
14563 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14565 case FLASH_5761VENDOR_ATMEL_ADB021D:
14566 case FLASH_5761VENDOR_ATMEL_ADB041D:
14567 case FLASH_5761VENDOR_ATMEL_ADB081D:
14568 case FLASH_5761VENDOR_ATMEL_ADB161D:
14569 case FLASH_5761VENDOR_ATMEL_MDB021D:
14570 case FLASH_5761VENDOR_ATMEL_MDB041D:
14571 case FLASH_5761VENDOR_ATMEL_MDB081D:
14572 case FLASH_5761VENDOR_ATMEL_MDB161D:
14573 tp->nvram_jedecnum = JEDEC_ATMEL;
14574 tg3_flag_set(tp, NVRAM_BUFFERED);
14575 tg3_flag_set(tp, FLASH);
14576 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14577 tp->nvram_pagesize = 256;
14579 case FLASH_5761VENDOR_ST_A_M45PE20:
14580 case FLASH_5761VENDOR_ST_A_M45PE40:
14581 case FLASH_5761VENDOR_ST_A_M45PE80:
14582 case FLASH_5761VENDOR_ST_A_M45PE16:
14583 case FLASH_5761VENDOR_ST_M_M45PE20:
14584 case FLASH_5761VENDOR_ST_M_M45PE40:
14585 case FLASH_5761VENDOR_ST_M_M45PE80:
14586 case FLASH_5761VENDOR_ST_M_M45PE16:
14587 tp->nvram_jedecnum = JEDEC_ST;
14588 tg3_flag_set(tp, NVRAM_BUFFERED);
14589 tg3_flag_set(tp, FLASH);
14590 tp->nvram_pagesize = 256;
14595 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14598 case FLASH_5761VENDOR_ATMEL_ADB161D:
14599 case FLASH_5761VENDOR_ATMEL_MDB161D:
14600 case FLASH_5761VENDOR_ST_A_M45PE16:
14601 case FLASH_5761VENDOR_ST_M_M45PE16:
14602 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14604 case FLASH_5761VENDOR_ATMEL_ADB081D:
14605 case FLASH_5761VENDOR_ATMEL_MDB081D:
14606 case FLASH_5761VENDOR_ST_A_M45PE80:
14607 case FLASH_5761VENDOR_ST_M_M45PE80:
14608 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14610 case FLASH_5761VENDOR_ATMEL_ADB041D:
14611 case FLASH_5761VENDOR_ATMEL_MDB041D:
14612 case FLASH_5761VENDOR_ST_A_M45PE40:
14613 case FLASH_5761VENDOR_ST_M_M45PE40:
14614 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14616 case FLASH_5761VENDOR_ATMEL_ADB021D:
14617 case FLASH_5761VENDOR_ATMEL_MDB021D:
14618 case FLASH_5761VENDOR_ST_A_M45PE20:
14619 case FLASH_5761VENDOR_ST_M_M45PE20:
14620 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14626 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14628 tp->nvram_jedecnum = JEDEC_ATMEL;
14629 tg3_flag_set(tp, NVRAM_BUFFERED);
14630 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14633 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14637 nvcfg1 = tr32(NVRAM_CFG1);
14639 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14640 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14641 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14642 tp->nvram_jedecnum = JEDEC_ATMEL;
14643 tg3_flag_set(tp, NVRAM_BUFFERED);
14644 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14646 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14647 tw32(NVRAM_CFG1, nvcfg1);
14649 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14650 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14651 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14652 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14653 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14654 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14655 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14656 tp->nvram_jedecnum = JEDEC_ATMEL;
14657 tg3_flag_set(tp, NVRAM_BUFFERED);
14658 tg3_flag_set(tp, FLASH);
14660 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14661 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14662 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14663 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14664 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14666 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14667 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14668 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14670 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14671 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14672 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14676 case FLASH_5752VENDOR_ST_M45PE10:
14677 case FLASH_5752VENDOR_ST_M45PE20:
14678 case FLASH_5752VENDOR_ST_M45PE40:
14679 tp->nvram_jedecnum = JEDEC_ST;
14680 tg3_flag_set(tp, NVRAM_BUFFERED);
14681 tg3_flag_set(tp, FLASH);
14683 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14684 case FLASH_5752VENDOR_ST_M45PE10:
14685 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14687 case FLASH_5752VENDOR_ST_M45PE20:
14688 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14690 case FLASH_5752VENDOR_ST_M45PE40:
14691 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14696 tg3_flag_set(tp, NO_NVRAM);
14700 tg3_nvram_get_pagesize(tp, nvcfg1);
14701 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14702 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14706 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14710 nvcfg1 = tr32(NVRAM_CFG1);
14712 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14713 case FLASH_5717VENDOR_ATMEL_EEPROM:
14714 case FLASH_5717VENDOR_MICRO_EEPROM:
14715 tp->nvram_jedecnum = JEDEC_ATMEL;
14716 tg3_flag_set(tp, NVRAM_BUFFERED);
14717 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14719 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14720 tw32(NVRAM_CFG1, nvcfg1);
14722 case FLASH_5717VENDOR_ATMEL_MDB011D:
14723 case FLASH_5717VENDOR_ATMEL_ADB011B:
14724 case FLASH_5717VENDOR_ATMEL_ADB011D:
14725 case FLASH_5717VENDOR_ATMEL_MDB021D:
14726 case FLASH_5717VENDOR_ATMEL_ADB021B:
14727 case FLASH_5717VENDOR_ATMEL_ADB021D:
14728 case FLASH_5717VENDOR_ATMEL_45USPT:
14729 tp->nvram_jedecnum = JEDEC_ATMEL;
14730 tg3_flag_set(tp, NVRAM_BUFFERED);
14731 tg3_flag_set(tp, FLASH);
14733 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14734 case FLASH_5717VENDOR_ATMEL_MDB021D:
14735 /* Detect size with tg3_nvram_get_size() */
14737 case FLASH_5717VENDOR_ATMEL_ADB021B:
14738 case FLASH_5717VENDOR_ATMEL_ADB021D:
14739 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14742 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14746 case FLASH_5717VENDOR_ST_M_M25PE10:
14747 case FLASH_5717VENDOR_ST_A_M25PE10:
14748 case FLASH_5717VENDOR_ST_M_M45PE10:
14749 case FLASH_5717VENDOR_ST_A_M45PE10:
14750 case FLASH_5717VENDOR_ST_M_M25PE20:
14751 case FLASH_5717VENDOR_ST_A_M25PE20:
14752 case FLASH_5717VENDOR_ST_M_M45PE20:
14753 case FLASH_5717VENDOR_ST_A_M45PE20:
14754 case FLASH_5717VENDOR_ST_25USPT:
14755 case FLASH_5717VENDOR_ST_45USPT:
14756 tp->nvram_jedecnum = JEDEC_ST;
14757 tg3_flag_set(tp, NVRAM_BUFFERED);
14758 tg3_flag_set(tp, FLASH);
14760 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14761 case FLASH_5717VENDOR_ST_M_M25PE20:
14762 case FLASH_5717VENDOR_ST_M_M45PE20:
14763 /* Detect size with tg3_nvram_get_size() */
14765 case FLASH_5717VENDOR_ST_A_M25PE20:
14766 case FLASH_5717VENDOR_ST_A_M45PE20:
14767 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14770 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14775 tg3_flag_set(tp, NO_NVRAM);
14779 tg3_nvram_get_pagesize(tp, nvcfg1);
14780 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14781 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14784 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14786 u32 nvcfg1, nvmpinstrp, nv_status;
14788 nvcfg1 = tr32(NVRAM_CFG1);
14789 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14791 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14792 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14793 tg3_flag_set(tp, NO_NVRAM);
14797 switch (nvmpinstrp) {
14798 case FLASH_5762_MX25L_100:
14799 case FLASH_5762_MX25L_200:
14800 case FLASH_5762_MX25L_400:
14801 case FLASH_5762_MX25L_800:
14802 case FLASH_5762_MX25L_160_320:
14803 tp->nvram_pagesize = 4096;
14804 tp->nvram_jedecnum = JEDEC_MACRONIX;
14805 tg3_flag_set(tp, NVRAM_BUFFERED);
14806 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14807 tg3_flag_set(tp, FLASH);
14808 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14810 (1 << (nv_status >> AUTOSENSE_DEVID &
14811 AUTOSENSE_DEVID_MASK)
14812 << AUTOSENSE_SIZE_IN_MB);
14815 case FLASH_5762_EEPROM_HD:
14816 nvmpinstrp = FLASH_5720_EEPROM_HD;
14818 case FLASH_5762_EEPROM_LD:
14819 nvmpinstrp = FLASH_5720_EEPROM_LD;
14821 case FLASH_5720VENDOR_M_ST_M45PE20:
14822 /* This pinstrap supports multiple sizes, so force it
14823 * to read the actual size from location 0xf0.
14825 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14830 switch (nvmpinstrp) {
14831 case FLASH_5720_EEPROM_HD:
14832 case FLASH_5720_EEPROM_LD:
14833 tp->nvram_jedecnum = JEDEC_ATMEL;
14834 tg3_flag_set(tp, NVRAM_BUFFERED);
14836 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14837 tw32(NVRAM_CFG1, nvcfg1);
14838 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14839 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14841 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14843 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14844 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14845 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14846 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14847 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14848 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14849 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14850 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14851 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14852 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14853 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14854 case FLASH_5720VENDOR_ATMEL_45USPT:
14855 tp->nvram_jedecnum = JEDEC_ATMEL;
14856 tg3_flag_set(tp, NVRAM_BUFFERED);
14857 tg3_flag_set(tp, FLASH);
14859 switch (nvmpinstrp) {
14860 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14861 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14862 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14863 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14865 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14866 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14867 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14868 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14870 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14871 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14872 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14875 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14876 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14880 case FLASH_5720VENDOR_M_ST_M25PE10:
14881 case FLASH_5720VENDOR_M_ST_M45PE10:
14882 case FLASH_5720VENDOR_A_ST_M25PE10:
14883 case FLASH_5720VENDOR_A_ST_M45PE10:
14884 case FLASH_5720VENDOR_M_ST_M25PE20:
14885 case FLASH_5720VENDOR_M_ST_M45PE20:
14886 case FLASH_5720VENDOR_A_ST_M25PE20:
14887 case FLASH_5720VENDOR_A_ST_M45PE20:
14888 case FLASH_5720VENDOR_M_ST_M25PE40:
14889 case FLASH_5720VENDOR_M_ST_M45PE40:
14890 case FLASH_5720VENDOR_A_ST_M25PE40:
14891 case FLASH_5720VENDOR_A_ST_M45PE40:
14892 case FLASH_5720VENDOR_M_ST_M25PE80:
14893 case FLASH_5720VENDOR_M_ST_M45PE80:
14894 case FLASH_5720VENDOR_A_ST_M25PE80:
14895 case FLASH_5720VENDOR_A_ST_M45PE80:
14896 case FLASH_5720VENDOR_ST_25USPT:
14897 case FLASH_5720VENDOR_ST_45USPT:
14898 tp->nvram_jedecnum = JEDEC_ST;
14899 tg3_flag_set(tp, NVRAM_BUFFERED);
14900 tg3_flag_set(tp, FLASH);
14902 switch (nvmpinstrp) {
14903 case FLASH_5720VENDOR_M_ST_M25PE20:
14904 case FLASH_5720VENDOR_M_ST_M45PE20:
14905 case FLASH_5720VENDOR_A_ST_M25PE20:
14906 case FLASH_5720VENDOR_A_ST_M45PE20:
14907 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14909 case FLASH_5720VENDOR_M_ST_M25PE40:
14910 case FLASH_5720VENDOR_M_ST_M45PE40:
14911 case FLASH_5720VENDOR_A_ST_M25PE40:
14912 case FLASH_5720VENDOR_A_ST_M45PE40:
14913 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14915 case FLASH_5720VENDOR_M_ST_M25PE80:
14916 case FLASH_5720VENDOR_M_ST_M45PE80:
14917 case FLASH_5720VENDOR_A_ST_M25PE80:
14918 case FLASH_5720VENDOR_A_ST_M45PE80:
14919 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14922 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14923 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14928 tg3_flag_set(tp, NO_NVRAM);
14932 tg3_nvram_get_pagesize(tp, nvcfg1);
14933 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14934 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14936 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14939 if (tg3_nvram_read(tp, 0, &val))
14942 if (val != TG3_EEPROM_MAGIC &&
14943 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14944 tg3_flag_set(tp, NO_NVRAM);
14948 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14949 static void tg3_nvram_init(struct tg3 *tp)
14951 if (tg3_flag(tp, IS_SSB_CORE)) {
14952 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14953 tg3_flag_clear(tp, NVRAM);
14954 tg3_flag_clear(tp, NVRAM_BUFFERED);
14955 tg3_flag_set(tp, NO_NVRAM);
14959 tw32_f(GRC_EEPROM_ADDR,
14960 (EEPROM_ADDR_FSM_RESET |
14961 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14962 EEPROM_ADDR_CLKPERD_SHIFT)));
14966 /* Enable seeprom accesses. */
14967 tw32_f(GRC_LOCAL_CTRL,
14968 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14971 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14972 tg3_asic_rev(tp) != ASIC_REV_5701) {
14973 tg3_flag_set(tp, NVRAM);
14975 if (tg3_nvram_lock(tp)) {
14976 netdev_warn(tp->dev,
14977 "Cannot get nvram lock, %s failed\n",
14981 tg3_enable_nvram_access(tp);
14983 tp->nvram_size = 0;
14985 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14986 tg3_get_5752_nvram_info(tp);
14987 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14988 tg3_get_5755_nvram_info(tp);
14989 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14990 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14991 tg3_asic_rev(tp) == ASIC_REV_5785)
14992 tg3_get_5787_nvram_info(tp);
14993 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14994 tg3_get_5761_nvram_info(tp);
14995 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14996 tg3_get_5906_nvram_info(tp);
14997 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14998 tg3_flag(tp, 57765_CLASS))
14999 tg3_get_57780_nvram_info(tp);
15000 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15001 tg3_asic_rev(tp) == ASIC_REV_5719)
15002 tg3_get_5717_nvram_info(tp);
15003 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15004 tg3_asic_rev(tp) == ASIC_REV_5762)
15005 tg3_get_5720_nvram_info(tp);
15007 tg3_get_nvram_info(tp);
15009 if (tp->nvram_size == 0)
15010 tg3_get_nvram_size(tp);
15012 tg3_disable_nvram_access(tp);
15013 tg3_nvram_unlock(tp);
15016 tg3_flag_clear(tp, NVRAM);
15017 tg3_flag_clear(tp, NVRAM_BUFFERED);
15019 tg3_get_eeprom_size(tp);
15023 struct subsys_tbl_ent {
15024 u16 subsys_vendor, subsys_devid;
15028 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15029 /* Broadcom boards. */
15030 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15031 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15032 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15033 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15034 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15035 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15036 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15037 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15038 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15039 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15040 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15041 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15042 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15043 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15044 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15045 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15046 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15047 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15048 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15049 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15050 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15051 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15054 { TG3PCI_SUBVENDOR_ID_3COM,
15055 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15056 { TG3PCI_SUBVENDOR_ID_3COM,
15057 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15058 { TG3PCI_SUBVENDOR_ID_3COM,
15059 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15060 { TG3PCI_SUBVENDOR_ID_3COM,
15061 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15062 { TG3PCI_SUBVENDOR_ID_3COM,
15063 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15066 { TG3PCI_SUBVENDOR_ID_DELL,
15067 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15068 { TG3PCI_SUBVENDOR_ID_DELL,
15069 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15070 { TG3PCI_SUBVENDOR_ID_DELL,
15071 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15072 { TG3PCI_SUBVENDOR_ID_DELL,
15073 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15075 /* Compaq boards. */
15076 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15077 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15078 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15079 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15080 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15081 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15082 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15083 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15084 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15085 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15088 { TG3PCI_SUBVENDOR_ID_IBM,
15089 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15092 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15096 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15097 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15098 tp->pdev->subsystem_vendor) &&
15099 (subsys_id_to_phy_id[i].subsys_devid ==
15100 tp->pdev->subsystem_device))
15101 return &subsys_id_to_phy_id[i];
15106 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15110 tp->phy_id = TG3_PHY_ID_INVALID;
15111 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15113 /* Assume an onboard device and WOL capable by default. */
15114 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15115 tg3_flag_set(tp, WOL_CAP);
15117 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15118 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15119 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15120 tg3_flag_set(tp, IS_NIC);
15122 val = tr32(VCPU_CFGSHDW);
15123 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15124 tg3_flag_set(tp, ASPM_WORKAROUND);
15125 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15126 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15127 tg3_flag_set(tp, WOL_ENABLE);
15128 device_set_wakeup_enable(&tp->pdev->dev, true);
15133 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15134 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15135 u32 nic_cfg, led_cfg;
15136 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15137 u32 nic_phy_id, ver, eeprom_phy_id;
15138 int eeprom_phy_serdes = 0;
15140 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15141 tp->nic_sram_data_cfg = nic_cfg;
15143 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15144 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15145 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15146 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15147 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15148 (ver > 0) && (ver < 0x100))
15149 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15151 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15152 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15154 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15155 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15156 tg3_asic_rev(tp) == ASIC_REV_5720)
15157 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15159 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15160 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15161 eeprom_phy_serdes = 1;
15163 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15164 if (nic_phy_id != 0) {
15165 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15166 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15168 eeprom_phy_id = (id1 >> 16) << 10;
15169 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15170 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15174 tp->phy_id = eeprom_phy_id;
15175 if (eeprom_phy_serdes) {
15176 if (!tg3_flag(tp, 5705_PLUS))
15177 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15179 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15182 if (tg3_flag(tp, 5750_PLUS))
15183 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15184 SHASTA_EXT_LED_MODE_MASK);
15186 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15190 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15191 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15194 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15195 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15198 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15199 tp->led_ctrl = LED_CTRL_MODE_MAC;
15201 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15202 * read on some older 5700/5701 bootcode.
15204 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15205 tg3_asic_rev(tp) == ASIC_REV_5701)
15206 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15210 case SHASTA_EXT_LED_SHARED:
15211 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15212 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15213 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15214 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15215 LED_CTRL_MODE_PHY_2);
15217 if (tg3_flag(tp, 5717_PLUS) ||
15218 tg3_asic_rev(tp) == ASIC_REV_5762)
15219 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15220 LED_CTRL_BLINK_RATE_MASK;
15224 case SHASTA_EXT_LED_MAC:
15225 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15228 case SHASTA_EXT_LED_COMBO:
15229 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15230 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15231 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15232 LED_CTRL_MODE_PHY_2);
15237 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15238 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15239 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15240 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15242 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15243 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15245 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15246 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15247 if ((tp->pdev->subsystem_vendor ==
15248 PCI_VENDOR_ID_ARIMA) &&
15249 (tp->pdev->subsystem_device == 0x205a ||
15250 tp->pdev->subsystem_device == 0x2063))
15251 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15253 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15254 tg3_flag_set(tp, IS_NIC);
15257 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15258 tg3_flag_set(tp, ENABLE_ASF);
15259 if (tg3_flag(tp, 5750_PLUS))
15260 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15263 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15264 tg3_flag(tp, 5750_PLUS))
15265 tg3_flag_set(tp, ENABLE_APE);
15267 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15268 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15269 tg3_flag_clear(tp, WOL_CAP);
15271 if (tg3_flag(tp, WOL_CAP) &&
15272 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15273 tg3_flag_set(tp, WOL_ENABLE);
15274 device_set_wakeup_enable(&tp->pdev->dev, true);
15277 if (cfg2 & (1 << 17))
15278 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15280 /* serdes signal pre-emphasis in register 0x590 set by */
15281 /* bootcode if bit 18 is set */
15282 if (cfg2 & (1 << 18))
15283 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15285 if ((tg3_flag(tp, 57765_PLUS) ||
15286 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15287 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15288 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15289 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15291 if (tg3_flag(tp, PCI_EXPRESS)) {
15294 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15295 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15296 !tg3_flag(tp, 57765_PLUS) &&
15297 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15298 tg3_flag_set(tp, ASPM_WORKAROUND);
15299 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15300 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15301 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15302 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15305 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15306 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15307 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15308 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15309 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15310 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15312 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15313 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15316 if (tg3_flag(tp, WOL_CAP))
15317 device_set_wakeup_enable(&tp->pdev->dev,
15318 tg3_flag(tp, WOL_ENABLE));
15320 device_set_wakeup_capable(&tp->pdev->dev, false);
15323 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15326 u32 val2, off = offset * 8;
15328 err = tg3_nvram_lock(tp);
15332 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15333 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15334 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15335 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15338 for (i = 0; i < 100; i++) {
15339 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15340 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15341 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15347 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15349 tg3_nvram_unlock(tp);
15350 if (val2 & APE_OTP_STATUS_CMD_DONE)
15356 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15361 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15362 tw32(OTP_CTRL, cmd);
15364 /* Wait for up to 1 ms for command to execute. */
15365 for (i = 0; i < 100; i++) {
15366 val = tr32(OTP_STATUS);
15367 if (val & OTP_STATUS_CMD_DONE)
15372 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15375 /* Read the gphy configuration from the OTP region of the chip. The gphy
15376 * configuration is a 32-bit value that straddles the alignment boundary.
15377 * We do two 32-bit reads and then shift and merge the results.
15379 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15381 u32 bhalf_otp, thalf_otp;
15383 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15385 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15388 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15390 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15393 thalf_otp = tr32(OTP_READ_DATA);
15395 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15397 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15400 bhalf_otp = tr32(OTP_READ_DATA);
15402 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15405 static void tg3_phy_init_link_config(struct tg3 *tp)
15407 u32 adv = ADVERTISED_Autoneg;
15409 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15410 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15411 adv |= ADVERTISED_1000baseT_Half;
15412 adv |= ADVERTISED_1000baseT_Full;
15415 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15416 adv |= ADVERTISED_100baseT_Half |
15417 ADVERTISED_100baseT_Full |
15418 ADVERTISED_10baseT_Half |
15419 ADVERTISED_10baseT_Full |
15422 adv |= ADVERTISED_FIBRE;
15424 tp->link_config.advertising = adv;
15425 tp->link_config.speed = SPEED_UNKNOWN;
15426 tp->link_config.duplex = DUPLEX_UNKNOWN;
15427 tp->link_config.autoneg = AUTONEG_ENABLE;
15428 tp->link_config.active_speed = SPEED_UNKNOWN;
15429 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15434 static int tg3_phy_probe(struct tg3 *tp)
15436 u32 hw_phy_id_1, hw_phy_id_2;
15437 u32 hw_phy_id, hw_phy_id_masked;
15440 /* flow control autonegotiation is default behavior */
15441 tg3_flag_set(tp, PAUSE_AUTONEG);
15442 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15444 if (tg3_flag(tp, ENABLE_APE)) {
15445 switch (tp->pci_fn) {
15447 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15450 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15453 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15456 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15461 if (!tg3_flag(tp, ENABLE_ASF) &&
15462 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15463 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15464 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15465 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15467 if (tg3_flag(tp, USE_PHYLIB))
15468 return tg3_phy_init(tp);
15470 /* Reading the PHY ID register can conflict with ASF
15471 * firmware access to the PHY hardware.
15474 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15475 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15477 /* Now read the physical PHY_ID from the chip and verify
15478 * that it is sane. If it doesn't look good, we fall back
15479 * to either the hard-coded table based PHY_ID and failing
15480 * that the value found in the eeprom area.
15482 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15483 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15485 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15486 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15487 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15489 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15492 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15493 tp->phy_id = hw_phy_id;
15494 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15495 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15497 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15499 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15500 /* Do nothing, phy ID already set up in
15501 * tg3_get_eeprom_hw_cfg().
15504 struct subsys_tbl_ent *p;
15506 /* No eeprom signature? Try the hardcoded
15507 * subsys device table.
15509 p = tg3_lookup_by_subsys(tp);
15511 tp->phy_id = p->phy_id;
15512 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15513 /* For now we saw the IDs 0xbc050cd0,
15514 * 0xbc050f80 and 0xbc050c30 on devices
15515 * connected to an BCM4785 and there are
15516 * probably more. Just assume that the phy is
15517 * supported when it is connected to a SSB core
15524 tp->phy_id == TG3_PHY_ID_BCM8002)
15525 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15529 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15530 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15531 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15532 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15533 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15534 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15535 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15536 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15537 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15538 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15540 tp->eee.supported = SUPPORTED_100baseT_Full |
15541 SUPPORTED_1000baseT_Full;
15542 tp->eee.advertised = ADVERTISED_100baseT_Full |
15543 ADVERTISED_1000baseT_Full;
15544 tp->eee.eee_enabled = 1;
15545 tp->eee.tx_lpi_enabled = 1;
15546 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15549 tg3_phy_init_link_config(tp);
15551 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15552 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15553 !tg3_flag(tp, ENABLE_APE) &&
15554 !tg3_flag(tp, ENABLE_ASF)) {
15557 tg3_readphy(tp, MII_BMSR, &bmsr);
15558 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15559 (bmsr & BMSR_LSTATUS))
15560 goto skip_phy_reset;
15562 err = tg3_phy_reset(tp);
15566 tg3_phy_set_wirespeed(tp);
15568 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15569 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15570 tp->link_config.flowctrl);
15572 tg3_writephy(tp, MII_BMCR,
15573 BMCR_ANENABLE | BMCR_ANRESTART);
15578 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15579 err = tg3_init_5401phy_dsp(tp);
15583 err = tg3_init_5401phy_dsp(tp);
15589 static void tg3_read_vpd(struct tg3 *tp)
15592 unsigned int len, vpdlen;
15595 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15599 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15600 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15604 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15607 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15608 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15612 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15613 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15616 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15617 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15619 goto out_not_found;
15621 if (len > TG3_BPN_SIZE)
15622 goto out_not_found;
15624 memcpy(tp->board_part_number, &vpd_data[i], len);
15628 if (tp->board_part_number[0])
15632 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15633 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15634 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15635 strcpy(tp->board_part_number, "BCM5717");
15636 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15637 strcpy(tp->board_part_number, "BCM5718");
15640 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15641 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15642 strcpy(tp->board_part_number, "BCM57780");
15643 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15644 strcpy(tp->board_part_number, "BCM57760");
15645 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15646 strcpy(tp->board_part_number, "BCM57790");
15647 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15648 strcpy(tp->board_part_number, "BCM57788");
15651 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15652 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15653 strcpy(tp->board_part_number, "BCM57761");
15654 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15655 strcpy(tp->board_part_number, "BCM57765");
15656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15657 strcpy(tp->board_part_number, "BCM57781");
15658 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15659 strcpy(tp->board_part_number, "BCM57785");
15660 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15661 strcpy(tp->board_part_number, "BCM57791");
15662 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15663 strcpy(tp->board_part_number, "BCM57795");
15666 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15667 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15668 strcpy(tp->board_part_number, "BCM57762");
15669 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15670 strcpy(tp->board_part_number, "BCM57766");
15671 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15672 strcpy(tp->board_part_number, "BCM57782");
15673 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15674 strcpy(tp->board_part_number, "BCM57786");
15677 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15678 strcpy(tp->board_part_number, "BCM95906");
15681 strcpy(tp->board_part_number, "none");
15685 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15689 if (tg3_nvram_read(tp, offset, &val) ||
15690 (val & 0xfc000000) != 0x0c000000 ||
15691 tg3_nvram_read(tp, offset + 4, &val) ||
15698 static void tg3_read_bc_ver(struct tg3 *tp)
15700 u32 val, offset, start, ver_offset;
15702 bool newver = false;
15704 if (tg3_nvram_read(tp, 0xc, &offset) ||
15705 tg3_nvram_read(tp, 0x4, &start))
15708 offset = tg3_nvram_logical_addr(tp, offset);
15710 if (tg3_nvram_read(tp, offset, &val))
15713 if ((val & 0xfc000000) == 0x0c000000) {
15714 if (tg3_nvram_read(tp, offset + 4, &val))
15721 dst_off = strlen(tp->fw_ver);
15724 if (TG3_VER_SIZE - dst_off < 16 ||
15725 tg3_nvram_read(tp, offset + 8, &ver_offset))
15728 offset = offset + ver_offset - start;
15729 for (i = 0; i < 16; i += 4) {
15731 if (tg3_nvram_read_be32(tp, offset + i, &v))
15734 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15739 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15742 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15743 TG3_NVM_BCVER_MAJSFT;
15744 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15745 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15746 "v%d.%02d", major, minor);
15750 static void tg3_read_hwsb_ver(struct tg3 *tp)
15752 u32 val, major, minor;
15754 /* Use native endian representation */
15755 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15758 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15759 TG3_NVM_HWSB_CFG1_MAJSFT;
15760 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15761 TG3_NVM_HWSB_CFG1_MINSFT;
15763 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15766 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15768 u32 offset, major, minor, build;
15770 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15772 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15775 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15776 case TG3_EEPROM_SB_REVISION_0:
15777 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15779 case TG3_EEPROM_SB_REVISION_2:
15780 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15782 case TG3_EEPROM_SB_REVISION_3:
15783 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15785 case TG3_EEPROM_SB_REVISION_4:
15786 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15788 case TG3_EEPROM_SB_REVISION_5:
15789 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15791 case TG3_EEPROM_SB_REVISION_6:
15792 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15798 if (tg3_nvram_read(tp, offset, &val))
15801 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15802 TG3_EEPROM_SB_EDH_BLD_SHFT;
15803 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15804 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15805 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15807 if (minor > 99 || build > 26)
15810 offset = strlen(tp->fw_ver);
15811 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15812 " v%d.%02d", major, minor);
15815 offset = strlen(tp->fw_ver);
15816 if (offset < TG3_VER_SIZE - 1)
15817 tp->fw_ver[offset] = 'a' + build - 1;
15821 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15823 u32 val, offset, start;
15826 for (offset = TG3_NVM_DIR_START;
15827 offset < TG3_NVM_DIR_END;
15828 offset += TG3_NVM_DIRENT_SIZE) {
15829 if (tg3_nvram_read(tp, offset, &val))
15832 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15836 if (offset == TG3_NVM_DIR_END)
15839 if (!tg3_flag(tp, 5705_PLUS))
15840 start = 0x08000000;
15841 else if (tg3_nvram_read(tp, offset - 4, &start))
15844 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15845 !tg3_fw_img_is_valid(tp, offset) ||
15846 tg3_nvram_read(tp, offset + 8, &val))
15849 offset += val - start;
15851 vlen = strlen(tp->fw_ver);
15853 tp->fw_ver[vlen++] = ',';
15854 tp->fw_ver[vlen++] = ' ';
15856 for (i = 0; i < 4; i++) {
15858 if (tg3_nvram_read_be32(tp, offset, &v))
15861 offset += sizeof(v);
15863 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15864 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15868 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15873 static void tg3_probe_ncsi(struct tg3 *tp)
15877 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15878 if (apedata != APE_SEG_SIG_MAGIC)
15881 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15882 if (!(apedata & APE_FW_STATUS_READY))
15885 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15886 tg3_flag_set(tp, APE_HAS_NCSI);
15889 static void tg3_read_dash_ver(struct tg3 *tp)
15895 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15897 if (tg3_flag(tp, APE_HAS_NCSI))
15899 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15904 vlen = strlen(tp->fw_ver);
15906 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15908 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15909 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15910 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15911 (apedata & APE_FW_VERSION_BLDMSK));
15914 static void tg3_read_otp_ver(struct tg3 *tp)
15918 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15921 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15922 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15923 TG3_OTP_MAGIC0_VALID(val)) {
15924 u64 val64 = (u64) val << 32 | val2;
15928 for (i = 0; i < 7; i++) {
15929 if ((val64 & 0xff) == 0)
15931 ver = val64 & 0xff;
15934 vlen = strlen(tp->fw_ver);
15935 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15939 static void tg3_read_fw_ver(struct tg3 *tp)
15942 bool vpd_vers = false;
15944 if (tp->fw_ver[0] != 0)
15947 if (tg3_flag(tp, NO_NVRAM)) {
15948 strcat(tp->fw_ver, "sb");
15949 tg3_read_otp_ver(tp);
15953 if (tg3_nvram_read(tp, 0, &val))
15956 if (val == TG3_EEPROM_MAGIC)
15957 tg3_read_bc_ver(tp);
15958 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15959 tg3_read_sb_ver(tp, val);
15960 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15961 tg3_read_hwsb_ver(tp);
15963 if (tg3_flag(tp, ENABLE_ASF)) {
15964 if (tg3_flag(tp, ENABLE_APE)) {
15965 tg3_probe_ncsi(tp);
15967 tg3_read_dash_ver(tp);
15968 } else if (!vpd_vers) {
15969 tg3_read_mgmtfw_ver(tp);
15973 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15976 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15978 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15979 return TG3_RX_RET_MAX_SIZE_5717;
15980 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15981 return TG3_RX_RET_MAX_SIZE_5700;
15983 return TG3_RX_RET_MAX_SIZE_5705;
15986 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15987 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15988 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15989 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15993 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15995 struct pci_dev *peer;
15996 unsigned int func, devnr = tp->pdev->devfn & ~7;
15998 for (func = 0; func < 8; func++) {
15999 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16000 if (peer && peer != tp->pdev)
16004 /* 5704 can be configured in single-port mode, set peer to
16005 * tp->pdev in that case.
16013 * We don't need to keep the refcount elevated; there's no way
16014 * to remove one half of this device without removing the other
16021 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16023 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16024 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16027 /* All devices that use the alternate
16028 * ASIC REV location have a CPMU.
16030 tg3_flag_set(tp, CPMU_PRESENT);
16032 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16038 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16043 reg = TG3PCI_GEN2_PRODID_ASICREV;
16044 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16049 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16050 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16051 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16053 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16054 reg = TG3PCI_GEN15_PRODID_ASICREV;
16056 reg = TG3PCI_PRODID_ASICREV;
16058 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16061 /* Wrong chip ID in 5752 A0. This code can be removed later
16062 * as A0 is not in production.
16064 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16065 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16067 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16068 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16070 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16071 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16072 tg3_asic_rev(tp) == ASIC_REV_5720)
16073 tg3_flag_set(tp, 5717_PLUS);
16075 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16076 tg3_asic_rev(tp) == ASIC_REV_57766)
16077 tg3_flag_set(tp, 57765_CLASS);
16079 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16080 tg3_asic_rev(tp) == ASIC_REV_5762)
16081 tg3_flag_set(tp, 57765_PLUS);
16083 /* Intentionally exclude ASIC_REV_5906 */
16084 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16085 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16086 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16087 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16088 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16089 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16090 tg3_flag(tp, 57765_PLUS))
16091 tg3_flag_set(tp, 5755_PLUS);
16093 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16094 tg3_asic_rev(tp) == ASIC_REV_5714)
16095 tg3_flag_set(tp, 5780_CLASS);
16097 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16098 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16099 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16100 tg3_flag(tp, 5755_PLUS) ||
16101 tg3_flag(tp, 5780_CLASS))
16102 tg3_flag_set(tp, 5750_PLUS);
16104 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16105 tg3_flag(tp, 5750_PLUS))
16106 tg3_flag_set(tp, 5705_PLUS);
16109 static bool tg3_10_100_only_device(struct tg3 *tp,
16110 const struct pci_device_id *ent)
16112 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16114 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16115 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16116 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16119 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16120 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16121 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16131 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16134 u32 pci_state_reg, grc_misc_cfg;
16139 /* Force memory write invalidate off. If we leave it on,
16140 * then on 5700_BX chips we have to enable a workaround.
16141 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16142 * to match the cacheline size. The Broadcom driver have this
16143 * workaround but turns MWI off all the times so never uses
16144 * it. This seems to suggest that the workaround is insufficient.
16146 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16147 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16148 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16150 /* Important! -- Make sure register accesses are byteswapped
16151 * correctly. Also, for those chips that require it, make
16152 * sure that indirect register accesses are enabled before
16153 * the first operation.
16155 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16157 tp->misc_host_ctrl |= (misc_ctrl_reg &
16158 MISC_HOST_CTRL_CHIPREV);
16159 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16160 tp->misc_host_ctrl);
16162 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16164 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16165 * we need to disable memory and use config. cycles
16166 * only to access all registers. The 5702/03 chips
16167 * can mistakenly decode the special cycles from the
16168 * ICH chipsets as memory write cycles, causing corruption
16169 * of register and memory space. Only certain ICH bridges
16170 * will drive special cycles with non-zero data during the
16171 * address phase which can fall within the 5703's address
16172 * range. This is not an ICH bug as the PCI spec allows
16173 * non-zero address during special cycles. However, only
16174 * these ICH bridges are known to drive non-zero addresses
16175 * during special cycles.
16177 * Since special cycles do not cross PCI bridges, we only
16178 * enable this workaround if the 5703 is on the secondary
16179 * bus of these ICH bridges.
16181 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16182 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16183 static struct tg3_dev_id {
16187 } ich_chipsets[] = {
16188 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16190 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16192 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16194 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16198 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16199 struct pci_dev *bridge = NULL;
16201 while (pci_id->vendor != 0) {
16202 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16208 if (pci_id->rev != PCI_ANY_ID) {
16209 if (bridge->revision > pci_id->rev)
16212 if (bridge->subordinate &&
16213 (bridge->subordinate->number ==
16214 tp->pdev->bus->number)) {
16215 tg3_flag_set(tp, ICH_WORKAROUND);
16216 pci_dev_put(bridge);
16222 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16223 static struct tg3_dev_id {
16226 } bridge_chipsets[] = {
16227 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16228 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16231 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16232 struct pci_dev *bridge = NULL;
16234 while (pci_id->vendor != 0) {
16235 bridge = pci_get_device(pci_id->vendor,
16242 if (bridge->subordinate &&
16243 (bridge->subordinate->number <=
16244 tp->pdev->bus->number) &&
16245 (bridge->subordinate->busn_res.end >=
16246 tp->pdev->bus->number)) {
16247 tg3_flag_set(tp, 5701_DMA_BUG);
16248 pci_dev_put(bridge);
16254 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16255 * DMA addresses > 40-bit. This bridge may have other additional
16256 * 57xx devices behind it in some 4-port NIC designs for example.
16257 * Any tg3 device found behind the bridge will also need the 40-bit
16260 if (tg3_flag(tp, 5780_CLASS)) {
16261 tg3_flag_set(tp, 40BIT_DMA_BUG);
16262 tp->msi_cap = tp->pdev->msi_cap;
16264 struct pci_dev *bridge = NULL;
16267 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16268 PCI_DEVICE_ID_SERVERWORKS_EPB,
16270 if (bridge && bridge->subordinate &&
16271 (bridge->subordinate->number <=
16272 tp->pdev->bus->number) &&
16273 (bridge->subordinate->busn_res.end >=
16274 tp->pdev->bus->number)) {
16275 tg3_flag_set(tp, 40BIT_DMA_BUG);
16276 pci_dev_put(bridge);
16282 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16283 tg3_asic_rev(tp) == ASIC_REV_5714)
16284 tp->pdev_peer = tg3_find_peer(tp);
16286 /* Determine TSO capabilities */
16287 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16288 ; /* Do nothing. HW bug. */
16289 else if (tg3_flag(tp, 57765_PLUS))
16290 tg3_flag_set(tp, HW_TSO_3);
16291 else if (tg3_flag(tp, 5755_PLUS) ||
16292 tg3_asic_rev(tp) == ASIC_REV_5906)
16293 tg3_flag_set(tp, HW_TSO_2);
16294 else if (tg3_flag(tp, 5750_PLUS)) {
16295 tg3_flag_set(tp, HW_TSO_1);
16296 tg3_flag_set(tp, TSO_BUG);
16297 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16298 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16299 tg3_flag_clear(tp, TSO_BUG);
16300 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16301 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16302 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16303 tg3_flag_set(tp, FW_TSO);
16304 tg3_flag_set(tp, TSO_BUG);
16305 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16306 tp->fw_needed = FIRMWARE_TG3TSO5;
16308 tp->fw_needed = FIRMWARE_TG3TSO;
16311 /* Selectively allow TSO based on operating conditions */
16312 if (tg3_flag(tp, HW_TSO_1) ||
16313 tg3_flag(tp, HW_TSO_2) ||
16314 tg3_flag(tp, HW_TSO_3) ||
16315 tg3_flag(tp, FW_TSO)) {
16316 /* For firmware TSO, assume ASF is disabled.
16317 * We'll disable TSO later if we discover ASF
16318 * is enabled in tg3_get_eeprom_hw_cfg().
16320 tg3_flag_set(tp, TSO_CAPABLE);
16322 tg3_flag_clear(tp, TSO_CAPABLE);
16323 tg3_flag_clear(tp, TSO_BUG);
16324 tp->fw_needed = NULL;
16327 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16328 tp->fw_needed = FIRMWARE_TG3;
16330 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16331 tp->fw_needed = FIRMWARE_TG357766;
16335 if (tg3_flag(tp, 5750_PLUS)) {
16336 tg3_flag_set(tp, SUPPORT_MSI);
16337 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16338 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16339 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16340 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16341 tp->pdev_peer == tp->pdev))
16342 tg3_flag_clear(tp, SUPPORT_MSI);
16344 if (tg3_flag(tp, 5755_PLUS) ||
16345 tg3_asic_rev(tp) == ASIC_REV_5906) {
16346 tg3_flag_set(tp, 1SHOT_MSI);
16349 if (tg3_flag(tp, 57765_PLUS)) {
16350 tg3_flag_set(tp, SUPPORT_MSIX);
16351 tp->irq_max = TG3_IRQ_MAX_VECS;
16357 if (tp->irq_max > 1) {
16358 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16359 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16361 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16362 tg3_asic_rev(tp) == ASIC_REV_5720)
16363 tp->txq_max = tp->irq_max - 1;
16366 if (tg3_flag(tp, 5755_PLUS) ||
16367 tg3_asic_rev(tp) == ASIC_REV_5906)
16368 tg3_flag_set(tp, SHORT_DMA_BUG);
16370 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16371 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16373 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16374 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16375 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16376 tg3_asic_rev(tp) == ASIC_REV_5762)
16377 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16379 if (tg3_flag(tp, 57765_PLUS) &&
16380 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16381 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16383 if (!tg3_flag(tp, 5705_PLUS) ||
16384 tg3_flag(tp, 5780_CLASS) ||
16385 tg3_flag(tp, USE_JUMBO_BDFLAG))
16386 tg3_flag_set(tp, JUMBO_CAPABLE);
16388 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16391 if (pci_is_pcie(tp->pdev)) {
16394 tg3_flag_set(tp, PCI_EXPRESS);
16396 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16397 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16398 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16399 tg3_flag_clear(tp, HW_TSO_2);
16400 tg3_flag_clear(tp, TSO_CAPABLE);
16402 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16403 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16404 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16405 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16406 tg3_flag_set(tp, CLKREQ_BUG);
16407 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16408 tg3_flag_set(tp, L1PLLPD_EN);
16410 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16411 /* BCM5785 devices are effectively PCIe devices, and should
16412 * follow PCIe codepaths, but do not have a PCIe capabilities
16415 tg3_flag_set(tp, PCI_EXPRESS);
16416 } else if (!tg3_flag(tp, 5705_PLUS) ||
16417 tg3_flag(tp, 5780_CLASS)) {
16418 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16419 if (!tp->pcix_cap) {
16420 dev_err(&tp->pdev->dev,
16421 "Cannot find PCI-X capability, aborting\n");
16425 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16426 tg3_flag_set(tp, PCIX_MODE);
16429 /* If we have an AMD 762 or VIA K8T800 chipset, write
16430 * reordering to the mailbox registers done by the host
16431 * controller can cause major troubles. We read back from
16432 * every mailbox register write to force the writes to be
16433 * posted to the chip in order.
16435 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16436 !tg3_flag(tp, PCI_EXPRESS))
16437 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16439 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16440 &tp->pci_cacheline_sz);
16441 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16442 &tp->pci_lat_timer);
16443 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16444 tp->pci_lat_timer < 64) {
16445 tp->pci_lat_timer = 64;
16446 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16447 tp->pci_lat_timer);
16450 /* Important! -- It is critical that the PCI-X hw workaround
16451 * situation is decided before the first MMIO register access.
16453 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16454 /* 5700 BX chips need to have their TX producer index
16455 * mailboxes written twice to workaround a bug.
16457 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16459 /* If we are in PCI-X mode, enable register write workaround.
16461 * The workaround is to use indirect register accesses
16462 * for all chip writes not to mailbox registers.
16464 if (tg3_flag(tp, PCIX_MODE)) {
16467 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16469 /* The chip can have it's power management PCI config
16470 * space registers clobbered due to this bug.
16471 * So explicitly force the chip into D0 here.
16473 pci_read_config_dword(tp->pdev,
16474 tp->pdev->pm_cap + PCI_PM_CTRL,
16476 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16477 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16478 pci_write_config_dword(tp->pdev,
16479 tp->pdev->pm_cap + PCI_PM_CTRL,
16482 /* Also, force SERR#/PERR# in PCI command. */
16483 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16484 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16485 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16489 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16490 tg3_flag_set(tp, PCI_HIGH_SPEED);
16491 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16492 tg3_flag_set(tp, PCI_32BIT);
16494 /* Chip-specific fixup from Broadcom driver */
16495 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16496 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16497 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16498 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16501 /* Default fast path register access methods */
16502 tp->read32 = tg3_read32;
16503 tp->write32 = tg3_write32;
16504 tp->read32_mbox = tg3_read32;
16505 tp->write32_mbox = tg3_write32;
16506 tp->write32_tx_mbox = tg3_write32;
16507 tp->write32_rx_mbox = tg3_write32;
16509 /* Various workaround register access methods */
16510 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16511 tp->write32 = tg3_write_indirect_reg32;
16512 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16513 (tg3_flag(tp, PCI_EXPRESS) &&
16514 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16516 * Back to back register writes can cause problems on these
16517 * chips, the workaround is to read back all reg writes
16518 * except those to mailbox regs.
16520 * See tg3_write_indirect_reg32().
16522 tp->write32 = tg3_write_flush_reg32;
16525 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16526 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16527 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16528 tp->write32_rx_mbox = tg3_write_flush_reg32;
16531 if (tg3_flag(tp, ICH_WORKAROUND)) {
16532 tp->read32 = tg3_read_indirect_reg32;
16533 tp->write32 = tg3_write_indirect_reg32;
16534 tp->read32_mbox = tg3_read_indirect_mbox;
16535 tp->write32_mbox = tg3_write_indirect_mbox;
16536 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16537 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16542 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16543 pci_cmd &= ~PCI_COMMAND_MEMORY;
16544 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16546 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16547 tp->read32_mbox = tg3_read32_mbox_5906;
16548 tp->write32_mbox = tg3_write32_mbox_5906;
16549 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16550 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16553 if (tp->write32 == tg3_write_indirect_reg32 ||
16554 (tg3_flag(tp, PCIX_MODE) &&
16555 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16556 tg3_asic_rev(tp) == ASIC_REV_5701)))
16557 tg3_flag_set(tp, SRAM_USE_CONFIG);
16559 /* The memory arbiter has to be enabled in order for SRAM accesses
16560 * to succeed. Normally on powerup the tg3 chip firmware will make
16561 * sure it is enabled, but other entities such as system netboot
16562 * code might disable it.
16564 val = tr32(MEMARB_MODE);
16565 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16567 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16568 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16569 tg3_flag(tp, 5780_CLASS)) {
16570 if (tg3_flag(tp, PCIX_MODE)) {
16571 pci_read_config_dword(tp->pdev,
16572 tp->pcix_cap + PCI_X_STATUS,
16574 tp->pci_fn = val & 0x7;
16576 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16577 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16578 tg3_asic_rev(tp) == ASIC_REV_5720) {
16579 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16580 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16581 val = tr32(TG3_CPMU_STATUS);
16583 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16584 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16586 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16587 TG3_CPMU_STATUS_FSHFT_5719;
16590 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16591 tp->write32_tx_mbox = tg3_write_flush_reg32;
16592 tp->write32_rx_mbox = tg3_write_flush_reg32;
16595 /* Get eeprom hw config before calling tg3_set_power_state().
16596 * In particular, the TG3_FLAG_IS_NIC flag must be
16597 * determined before calling tg3_set_power_state() so that
16598 * we know whether or not to switch out of Vaux power.
16599 * When the flag is set, it means that GPIO1 is used for eeprom
16600 * write protect and also implies that it is a LOM where GPIOs
16601 * are not used to switch power.
16603 tg3_get_eeprom_hw_cfg(tp);
16605 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16606 tg3_flag_clear(tp, TSO_CAPABLE);
16607 tg3_flag_clear(tp, TSO_BUG);
16608 tp->fw_needed = NULL;
16611 if (tg3_flag(tp, ENABLE_APE)) {
16612 /* Allow reads and writes to the
16613 * APE register and memory space.
16615 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16616 PCISTATE_ALLOW_APE_SHMEM_WR |
16617 PCISTATE_ALLOW_APE_PSPACE_WR;
16618 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16621 tg3_ape_lock_init(tp);
16622 tp->ape_hb_interval =
16623 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16626 /* Set up tp->grc_local_ctrl before calling
16627 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16628 * will bring 5700's external PHY out of reset.
16629 * It is also used as eeprom write protect on LOMs.
16631 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16632 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16633 tg3_flag(tp, EEPROM_WRITE_PROT))
16634 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16635 GRC_LCLCTRL_GPIO_OUTPUT1);
16636 /* Unused GPIO3 must be driven as output on 5752 because there
16637 * are no pull-up resistors on unused GPIO pins.
16639 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16640 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16642 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16643 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16644 tg3_flag(tp, 57765_CLASS))
16645 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16647 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16649 /* Turn off the debug UART. */
16650 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16651 if (tg3_flag(tp, IS_NIC))
16652 /* Keep VMain power. */
16653 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16654 GRC_LCLCTRL_GPIO_OUTPUT0;
16657 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16658 tp->grc_local_ctrl |=
16659 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16661 /* Switch out of Vaux if it is a NIC */
16662 tg3_pwrsrc_switch_to_vmain(tp);
16664 /* Derive initial jumbo mode from MTU assigned in
16665 * ether_setup() via the alloc_etherdev() call
16667 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16668 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16670 /* Determine WakeOnLan speed to use. */
16671 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16672 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16673 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16674 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16675 tg3_flag_clear(tp, WOL_SPEED_100MB);
16677 tg3_flag_set(tp, WOL_SPEED_100MB);
16680 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16681 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16683 /* A few boards don't want Ethernet@WireSpeed phy feature */
16684 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16685 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16686 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16687 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16688 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16689 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16690 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16692 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16693 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16694 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16695 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16696 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16698 if (tg3_flag(tp, 5705_PLUS) &&
16699 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16700 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16701 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16702 !tg3_flag(tp, 57765_PLUS)) {
16703 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16704 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16705 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16706 tg3_asic_rev(tp) == ASIC_REV_5761) {
16707 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16708 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16709 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16710 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16711 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16713 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16716 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16717 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16718 tp->phy_otp = tg3_read_otp_phycfg(tp);
16719 if (tp->phy_otp == 0)
16720 tp->phy_otp = TG3_OTP_DEFAULT;
16723 if (tg3_flag(tp, CPMU_PRESENT))
16724 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16726 tp->mi_mode = MAC_MI_MODE_BASE;
16728 tp->coalesce_mode = 0;
16729 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16730 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16731 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16733 /* Set these bits to enable statistics workaround. */
16734 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16735 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16736 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16737 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16738 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16739 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16742 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16743 tg3_asic_rev(tp) == ASIC_REV_57780)
16744 tg3_flag_set(tp, USE_PHYLIB);
16746 err = tg3_mdio_init(tp);
16750 /* Initialize data/descriptor byte/word swapping. */
16751 val = tr32(GRC_MODE);
16752 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16753 tg3_asic_rev(tp) == ASIC_REV_5762)
16754 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16755 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16756 GRC_MODE_B2HRX_ENABLE |
16757 GRC_MODE_HTX2B_ENABLE |
16758 GRC_MODE_HOST_STACKUP);
16760 val &= GRC_MODE_HOST_STACKUP;
16762 tw32(GRC_MODE, val | tp->grc_mode);
16764 tg3_switch_clocks(tp);
16766 /* Clear this out for sanity. */
16767 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16769 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16770 tw32(TG3PCI_REG_BASE_ADDR, 0);
16772 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16774 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16775 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16776 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16777 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16778 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16779 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16780 void __iomem *sram_base;
16782 /* Write some dummy words into the SRAM status block
16783 * area, see if it reads back correctly. If the return
16784 * value is bad, force enable the PCIX workaround.
16786 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16788 writel(0x00000000, sram_base);
16789 writel(0x00000000, sram_base + 4);
16790 writel(0xffffffff, sram_base + 4);
16791 if (readl(sram_base) != 0x00000000)
16792 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16797 tg3_nvram_init(tp);
16799 /* If the device has an NVRAM, no need to load patch firmware */
16800 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16801 !tg3_flag(tp, NO_NVRAM))
16802 tp->fw_needed = NULL;
16804 grc_misc_cfg = tr32(GRC_MISC_CFG);
16805 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16807 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16808 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16809 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16810 tg3_flag_set(tp, IS_5788);
16812 if (!tg3_flag(tp, IS_5788) &&
16813 tg3_asic_rev(tp) != ASIC_REV_5700)
16814 tg3_flag_set(tp, TAGGED_STATUS);
16815 if (tg3_flag(tp, TAGGED_STATUS)) {
16816 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16817 HOSTCC_MODE_CLRTICK_TXBD);
16819 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16820 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16821 tp->misc_host_ctrl);
16824 /* Preserve the APE MAC_MODE bits */
16825 if (tg3_flag(tp, ENABLE_APE))
16826 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16830 if (tg3_10_100_only_device(tp, ent))
16831 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16833 err = tg3_phy_probe(tp);
16835 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16836 /* ... but do not return immediately ... */
16841 tg3_read_fw_ver(tp);
16843 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16844 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16846 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16847 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16849 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16852 /* 5700 {AX,BX} chips have a broken status block link
16853 * change bit implementation, so we must use the
16854 * status register in those cases.
16856 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16857 tg3_flag_set(tp, USE_LINKCHG_REG);
16859 tg3_flag_clear(tp, USE_LINKCHG_REG);
16861 /* The led_ctrl is set during tg3_phy_probe, here we might
16862 * have to force the link status polling mechanism based
16863 * upon subsystem IDs.
16865 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16866 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16867 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16868 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16869 tg3_flag_set(tp, USE_LINKCHG_REG);
16872 /* For all SERDES we poll the MAC status register. */
16873 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16874 tg3_flag_set(tp, POLL_SERDES);
16876 tg3_flag_clear(tp, POLL_SERDES);
16878 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16879 tg3_flag_set(tp, POLL_CPMU_LINK);
16881 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16882 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16883 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16884 tg3_flag(tp, PCIX_MODE)) {
16885 tp->rx_offset = NET_SKB_PAD;
16886 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16887 tp->rx_copy_thresh = ~(u16)0;
16891 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16892 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16893 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16895 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16897 /* Increment the rx prod index on the rx std ring by at most
16898 * 8 for these chips to workaround hw errata.
16900 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16901 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16902 tg3_asic_rev(tp) == ASIC_REV_5755)
16903 tp->rx_std_max_post = 8;
16905 if (tg3_flag(tp, ASPM_WORKAROUND))
16906 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16907 PCIE_PWR_MGMT_L1_THRESH_MSK;
16912 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16914 u32 hi, lo, mac_offset;
16918 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16921 if (tg3_flag(tp, IS_SSB_CORE)) {
16922 err = ssb_gige_get_macaddr(tp->pdev, addr);
16923 if (!err && is_valid_ether_addr(addr))
16928 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16929 tg3_flag(tp, 5780_CLASS)) {
16930 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16932 if (tg3_nvram_lock(tp))
16933 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16935 tg3_nvram_unlock(tp);
16936 } else if (tg3_flag(tp, 5717_PLUS)) {
16937 if (tp->pci_fn & 1)
16939 if (tp->pci_fn > 1)
16940 mac_offset += 0x18c;
16941 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16944 /* First try to get it from MAC address mailbox. */
16945 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16946 if ((hi >> 16) == 0x484b) {
16947 addr[0] = (hi >> 8) & 0xff;
16948 addr[1] = (hi >> 0) & 0xff;
16950 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16951 addr[2] = (lo >> 24) & 0xff;
16952 addr[3] = (lo >> 16) & 0xff;
16953 addr[4] = (lo >> 8) & 0xff;
16954 addr[5] = (lo >> 0) & 0xff;
16956 /* Some old bootcode may report a 0 MAC address in SRAM */
16957 addr_ok = is_valid_ether_addr(addr);
16960 /* Next, try NVRAM. */
16961 if (!tg3_flag(tp, NO_NVRAM) &&
16962 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16963 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16964 memcpy(&addr[0], ((char *)&hi) + 2, 2);
16965 memcpy(&addr[2], (char *)&lo, sizeof(lo));
16967 /* Finally just fetch it out of the MAC control regs. */
16969 hi = tr32(MAC_ADDR_0_HIGH);
16970 lo = tr32(MAC_ADDR_0_LOW);
16972 addr[5] = lo & 0xff;
16973 addr[4] = (lo >> 8) & 0xff;
16974 addr[3] = (lo >> 16) & 0xff;
16975 addr[2] = (lo >> 24) & 0xff;
16976 addr[1] = hi & 0xff;
16977 addr[0] = (hi >> 8) & 0xff;
16981 if (!is_valid_ether_addr(addr))
16986 #define BOUNDARY_SINGLE_CACHELINE 1
16987 #define BOUNDARY_MULTI_CACHELINE 2
16989 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16991 int cacheline_size;
16995 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16997 cacheline_size = 1024;
16999 cacheline_size = (int) byte * 4;
17001 /* On 5703 and later chips, the boundary bits have no
17004 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17005 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17006 !tg3_flag(tp, PCI_EXPRESS))
17009 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17010 goal = BOUNDARY_MULTI_CACHELINE;
17012 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17013 goal = BOUNDARY_SINGLE_CACHELINE;
17019 if (tg3_flag(tp, 57765_PLUS)) {
17020 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17027 /* PCI controllers on most RISC systems tend to disconnect
17028 * when a device tries to burst across a cache-line boundary.
17029 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17031 * Unfortunately, for PCI-E there are only limited
17032 * write-side controls for this, and thus for reads
17033 * we will still get the disconnects. We'll also waste
17034 * these PCI cycles for both read and write for chips
17035 * other than 5700 and 5701 which do not implement the
17038 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17039 switch (cacheline_size) {
17044 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17045 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17046 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17048 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17049 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17054 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17055 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17059 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17060 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17063 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17064 switch (cacheline_size) {
17068 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17069 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17070 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17076 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17077 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17081 switch (cacheline_size) {
17083 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17084 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17085 DMA_RWCTRL_WRITE_BNDRY_16);
17090 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17091 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17092 DMA_RWCTRL_WRITE_BNDRY_32);
17097 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17098 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17099 DMA_RWCTRL_WRITE_BNDRY_64);
17104 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17105 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17106 DMA_RWCTRL_WRITE_BNDRY_128);
17111 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17112 DMA_RWCTRL_WRITE_BNDRY_256);
17115 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17116 DMA_RWCTRL_WRITE_BNDRY_512);
17120 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17121 DMA_RWCTRL_WRITE_BNDRY_1024);
17130 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17131 int size, bool to_device)
17133 struct tg3_internal_buffer_desc test_desc;
17134 u32 sram_dma_descs;
17137 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17139 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17140 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17141 tw32(RDMAC_STATUS, 0);
17142 tw32(WDMAC_STATUS, 0);
17144 tw32(BUFMGR_MODE, 0);
17145 tw32(FTQ_RESET, 0);
17147 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17148 test_desc.addr_lo = buf_dma & 0xffffffff;
17149 test_desc.nic_mbuf = 0x00002100;
17150 test_desc.len = size;
17153 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17154 * the *second* time the tg3 driver was getting loaded after an
17157 * Broadcom tells me:
17158 * ...the DMA engine is connected to the GRC block and a DMA
17159 * reset may affect the GRC block in some unpredictable way...
17160 * The behavior of resets to individual blocks has not been tested.
17162 * Broadcom noted the GRC reset will also reset all sub-components.
17165 test_desc.cqid_sqid = (13 << 8) | 2;
17167 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17170 test_desc.cqid_sqid = (16 << 8) | 7;
17172 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17175 test_desc.flags = 0x00000005;
17177 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17180 val = *(((u32 *)&test_desc) + i);
17181 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17182 sram_dma_descs + (i * sizeof(u32)));
17183 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17185 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17188 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17190 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17193 for (i = 0; i < 40; i++) {
17197 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17199 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17200 if ((val & 0xffff) == sram_dma_descs) {
17211 #define TEST_BUFFER_SIZE 0x2000
17213 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17214 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17218 static int tg3_test_dma(struct tg3 *tp)
17220 dma_addr_t buf_dma;
17221 u32 *buf, saved_dma_rwctrl;
17224 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17225 &buf_dma, GFP_KERNEL);
17231 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17232 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17234 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17236 if (tg3_flag(tp, 57765_PLUS))
17239 if (tg3_flag(tp, PCI_EXPRESS)) {
17240 /* DMA read watermark not used on PCIE */
17241 tp->dma_rwctrl |= 0x00180000;
17242 } else if (!tg3_flag(tp, PCIX_MODE)) {
17243 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17244 tg3_asic_rev(tp) == ASIC_REV_5750)
17245 tp->dma_rwctrl |= 0x003f0000;
17247 tp->dma_rwctrl |= 0x003f000f;
17249 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17250 tg3_asic_rev(tp) == ASIC_REV_5704) {
17251 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17252 u32 read_water = 0x7;
17254 /* If the 5704 is behind the EPB bridge, we can
17255 * do the less restrictive ONE_DMA workaround for
17256 * better performance.
17258 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17259 tg3_asic_rev(tp) == ASIC_REV_5704)
17260 tp->dma_rwctrl |= 0x8000;
17261 else if (ccval == 0x6 || ccval == 0x7)
17262 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17264 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17266 /* Set bit 23 to enable PCIX hw bug fix */
17268 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17269 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17271 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17272 /* 5780 always in PCIX mode */
17273 tp->dma_rwctrl |= 0x00144000;
17274 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17275 /* 5714 always in PCIX mode */
17276 tp->dma_rwctrl |= 0x00148000;
17278 tp->dma_rwctrl |= 0x001b000f;
17281 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17282 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17284 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17285 tg3_asic_rev(tp) == ASIC_REV_5704)
17286 tp->dma_rwctrl &= 0xfffffff0;
17288 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17289 tg3_asic_rev(tp) == ASIC_REV_5701) {
17290 /* Remove this if it causes problems for some boards. */
17291 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17293 /* On 5700/5701 chips, we need to set this bit.
17294 * Otherwise the chip will issue cacheline transactions
17295 * to streamable DMA memory with not all the byte
17296 * enables turned on. This is an error on several
17297 * RISC PCI controllers, in particular sparc64.
17299 * On 5703/5704 chips, this bit has been reassigned
17300 * a different meaning. In particular, it is used
17301 * on those chips to enable a PCI-X workaround.
17303 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17306 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17309 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17310 tg3_asic_rev(tp) != ASIC_REV_5701)
17313 /* It is best to perform DMA test with maximum write burst size
17314 * to expose the 5700/5701 write DMA bug.
17316 saved_dma_rwctrl = tp->dma_rwctrl;
17317 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17318 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17323 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17326 /* Send the buffer to the chip. */
17327 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17329 dev_err(&tp->pdev->dev,
17330 "%s: Buffer write failed. err = %d\n",
17335 /* Now read it back. */
17336 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17338 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17339 "err = %d\n", __func__, ret);
17344 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17348 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17349 DMA_RWCTRL_WRITE_BNDRY_16) {
17350 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17351 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17352 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17355 dev_err(&tp->pdev->dev,
17356 "%s: Buffer corrupted on read back! "
17357 "(%d != %d)\n", __func__, p[i], i);
17363 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17369 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17370 DMA_RWCTRL_WRITE_BNDRY_16) {
17371 /* DMA test passed without adjusting DMA boundary,
17372 * now look for chipsets that are known to expose the
17373 * DMA bug without failing the test.
17375 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17376 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17377 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17379 /* Safe to use the calculated DMA boundary. */
17380 tp->dma_rwctrl = saved_dma_rwctrl;
17383 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17387 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17392 static void tg3_init_bufmgr_config(struct tg3 *tp)
17394 if (tg3_flag(tp, 57765_PLUS)) {
17395 tp->bufmgr_config.mbuf_read_dma_low_water =
17396 DEFAULT_MB_RDMA_LOW_WATER_5705;
17397 tp->bufmgr_config.mbuf_mac_rx_low_water =
17398 DEFAULT_MB_MACRX_LOW_WATER_57765;
17399 tp->bufmgr_config.mbuf_high_water =
17400 DEFAULT_MB_HIGH_WATER_57765;
17402 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17403 DEFAULT_MB_RDMA_LOW_WATER_5705;
17404 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17405 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17406 tp->bufmgr_config.mbuf_high_water_jumbo =
17407 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17408 } else if (tg3_flag(tp, 5705_PLUS)) {
17409 tp->bufmgr_config.mbuf_read_dma_low_water =
17410 DEFAULT_MB_RDMA_LOW_WATER_5705;
17411 tp->bufmgr_config.mbuf_mac_rx_low_water =
17412 DEFAULT_MB_MACRX_LOW_WATER_5705;
17413 tp->bufmgr_config.mbuf_high_water =
17414 DEFAULT_MB_HIGH_WATER_5705;
17415 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17416 tp->bufmgr_config.mbuf_mac_rx_low_water =
17417 DEFAULT_MB_MACRX_LOW_WATER_5906;
17418 tp->bufmgr_config.mbuf_high_water =
17419 DEFAULT_MB_HIGH_WATER_5906;
17422 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17423 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17424 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17425 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17426 tp->bufmgr_config.mbuf_high_water_jumbo =
17427 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17429 tp->bufmgr_config.mbuf_read_dma_low_water =
17430 DEFAULT_MB_RDMA_LOW_WATER;
17431 tp->bufmgr_config.mbuf_mac_rx_low_water =
17432 DEFAULT_MB_MACRX_LOW_WATER;
17433 tp->bufmgr_config.mbuf_high_water =
17434 DEFAULT_MB_HIGH_WATER;
17436 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17437 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17438 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17439 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17440 tp->bufmgr_config.mbuf_high_water_jumbo =
17441 DEFAULT_MB_HIGH_WATER_JUMBO;
17444 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17445 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17448 static char *tg3_phy_string(struct tg3 *tp)
17450 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17451 case TG3_PHY_ID_BCM5400: return "5400";
17452 case TG3_PHY_ID_BCM5401: return "5401";
17453 case TG3_PHY_ID_BCM5411: return "5411";
17454 case TG3_PHY_ID_BCM5701: return "5701";
17455 case TG3_PHY_ID_BCM5703: return "5703";
17456 case TG3_PHY_ID_BCM5704: return "5704";
17457 case TG3_PHY_ID_BCM5705: return "5705";
17458 case TG3_PHY_ID_BCM5750: return "5750";
17459 case TG3_PHY_ID_BCM5752: return "5752";
17460 case TG3_PHY_ID_BCM5714: return "5714";
17461 case TG3_PHY_ID_BCM5780: return "5780";
17462 case TG3_PHY_ID_BCM5755: return "5755";
17463 case TG3_PHY_ID_BCM5787: return "5787";
17464 case TG3_PHY_ID_BCM5784: return "5784";
17465 case TG3_PHY_ID_BCM5756: return "5722/5756";
17466 case TG3_PHY_ID_BCM5906: return "5906";
17467 case TG3_PHY_ID_BCM5761: return "5761";
17468 case TG3_PHY_ID_BCM5718C: return "5718C";
17469 case TG3_PHY_ID_BCM5718S: return "5718S";
17470 case TG3_PHY_ID_BCM57765: return "57765";
17471 case TG3_PHY_ID_BCM5719C: return "5719C";
17472 case TG3_PHY_ID_BCM5720C: return "5720C";
17473 case TG3_PHY_ID_BCM5762: return "5762C";
17474 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17475 case 0: return "serdes";
17476 default: return "unknown";
17480 static char *tg3_bus_string(struct tg3 *tp, char *str)
17482 if (tg3_flag(tp, PCI_EXPRESS)) {
17483 strcpy(str, "PCI Express");
17485 } else if (tg3_flag(tp, PCIX_MODE)) {
17486 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17488 strcpy(str, "PCIX:");
17490 if ((clock_ctrl == 7) ||
17491 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17492 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17493 strcat(str, "133MHz");
17494 else if (clock_ctrl == 0)
17495 strcat(str, "33MHz");
17496 else if (clock_ctrl == 2)
17497 strcat(str, "50MHz");
17498 else if (clock_ctrl == 4)
17499 strcat(str, "66MHz");
17500 else if (clock_ctrl == 6)
17501 strcat(str, "100MHz");
17503 strcpy(str, "PCI:");
17504 if (tg3_flag(tp, PCI_HIGH_SPEED))
17505 strcat(str, "66MHz");
17507 strcat(str, "33MHz");
17509 if (tg3_flag(tp, PCI_32BIT))
17510 strcat(str, ":32-bit");
17512 strcat(str, ":64-bit");
17516 static void tg3_init_coal(struct tg3 *tp)
17518 struct ethtool_coalesce *ec = &tp->coal;
17520 memset(ec, 0, sizeof(*ec));
17521 ec->cmd = ETHTOOL_GCOALESCE;
17522 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17523 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17524 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17525 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17526 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17527 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17528 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17529 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17530 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17532 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17533 HOSTCC_MODE_CLRTICK_TXBD)) {
17534 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17535 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17536 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17537 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17540 if (tg3_flag(tp, 5705_PLUS)) {
17541 ec->rx_coalesce_usecs_irq = 0;
17542 ec->tx_coalesce_usecs_irq = 0;
17543 ec->stats_block_coalesce_usecs = 0;
17547 static int tg3_init_one(struct pci_dev *pdev,
17548 const struct pci_device_id *ent)
17550 struct net_device *dev;
17553 u32 sndmbx, rcvmbx, intmbx;
17555 u64 dma_mask, persist_dma_mask;
17556 netdev_features_t features = 0;
17557 u8 addr[ETH_ALEN] __aligned(2);
17559 err = pci_enable_device(pdev);
17561 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17565 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17567 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17568 goto err_out_disable_pdev;
17571 pci_set_master(pdev);
17573 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17576 goto err_out_free_res;
17579 SET_NETDEV_DEV(dev, &pdev->dev);
17581 tp = netdev_priv(dev);
17584 tp->rx_mode = TG3_DEF_RX_MODE;
17585 tp->tx_mode = TG3_DEF_TX_MODE;
17587 tp->pcierr_recovery = false;
17590 tp->msg_enable = tg3_debug;
17592 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17594 if (pdev_is_ssb_gige_core(pdev)) {
17595 tg3_flag_set(tp, IS_SSB_CORE);
17596 if (ssb_gige_must_flush_posted_writes(pdev))
17597 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17598 if (ssb_gige_one_dma_at_once(pdev))
17599 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17600 if (ssb_gige_have_roboswitch(pdev)) {
17601 tg3_flag_set(tp, USE_PHYLIB);
17602 tg3_flag_set(tp, ROBOSWITCH);
17604 if (ssb_gige_is_rgmii(pdev))
17605 tg3_flag_set(tp, RGMII_MODE);
17608 /* The word/byte swap controls here control register access byte
17609 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17612 tp->misc_host_ctrl =
17613 MISC_HOST_CTRL_MASK_PCI_INT |
17614 MISC_HOST_CTRL_WORD_SWAP |
17615 MISC_HOST_CTRL_INDIR_ACCESS |
17616 MISC_HOST_CTRL_PCISTATE_RW;
17618 /* The NONFRM (non-frame) byte/word swap controls take effect
17619 * on descriptor entries, anything which isn't packet data.
17621 * The StrongARM chips on the board (one for tx, one for rx)
17622 * are running in big-endian mode.
17624 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17625 GRC_MODE_WSWAP_NONFRM_DATA);
17626 #ifdef __BIG_ENDIAN
17627 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17629 spin_lock_init(&tp->lock);
17630 spin_lock_init(&tp->indirect_lock);
17631 INIT_WORK(&tp->reset_task, tg3_reset_task);
17633 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17635 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17637 goto err_out_free_dev;
17640 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17641 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17642 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17643 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17644 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17645 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17646 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17647 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17648 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17651 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17652 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17653 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17654 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17655 tg3_flag_set(tp, ENABLE_APE);
17656 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17657 if (!tp->aperegs) {
17658 dev_err(&pdev->dev,
17659 "Cannot map APE registers, aborting\n");
17661 goto err_out_iounmap;
17665 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17666 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17668 dev->ethtool_ops = &tg3_ethtool_ops;
17669 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17670 dev->netdev_ops = &tg3_netdev_ops;
17671 dev->irq = pdev->irq;
17673 err = tg3_get_invariants(tp, ent);
17675 dev_err(&pdev->dev,
17676 "Problem fetching invariants of chip, aborting\n");
17677 goto err_out_apeunmap;
17680 /* The EPB bridge inside 5714, 5715, and 5780 and any
17681 * device behind the EPB cannot support DMA addresses > 40-bit.
17682 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17683 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17684 * do DMA address check in tg3_start_xmit().
17686 if (tg3_flag(tp, IS_5788))
17687 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17688 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17689 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17690 #ifdef CONFIG_HIGHMEM
17691 dma_mask = DMA_BIT_MASK(64);
17694 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17696 /* Configure DMA attributes. */
17697 if (dma_mask > DMA_BIT_MASK(32)) {
17698 err = dma_set_mask(&pdev->dev, dma_mask);
17700 features |= NETIF_F_HIGHDMA;
17701 err = dma_set_coherent_mask(&pdev->dev,
17704 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17705 "DMA for consistent allocations\n");
17706 goto err_out_apeunmap;
17710 if (err || dma_mask == DMA_BIT_MASK(32)) {
17711 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17713 dev_err(&pdev->dev,
17714 "No usable DMA configuration, aborting\n");
17715 goto err_out_apeunmap;
17719 tg3_init_bufmgr_config(tp);
17721 /* 5700 B0 chips do not support checksumming correctly due
17722 * to hardware bugs.
17724 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17725 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17727 if (tg3_flag(tp, 5755_PLUS))
17728 features |= NETIF_F_IPV6_CSUM;
17731 /* TSO is on by default on chips that support hardware TSO.
17732 * Firmware TSO on older chips gives lower performance, so it
17733 * is off by default, but can be enabled using ethtool.
17735 if ((tg3_flag(tp, HW_TSO_1) ||
17736 tg3_flag(tp, HW_TSO_2) ||
17737 tg3_flag(tp, HW_TSO_3)) &&
17738 (features & NETIF_F_IP_CSUM))
17739 features |= NETIF_F_TSO;
17740 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17741 if (features & NETIF_F_IPV6_CSUM)
17742 features |= NETIF_F_TSO6;
17743 if (tg3_flag(tp, HW_TSO_3) ||
17744 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17745 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17746 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17747 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17748 tg3_asic_rev(tp) == ASIC_REV_57780)
17749 features |= NETIF_F_TSO_ECN;
17752 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17753 NETIF_F_HW_VLAN_CTAG_RX;
17754 dev->vlan_features |= features;
17757 * Add loopback capability only for a subset of devices that support
17758 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17759 * loopback for the remaining devices.
17761 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17762 !tg3_flag(tp, CPMU_PRESENT))
17763 /* Add the loopback capability */
17764 features |= NETIF_F_LOOPBACK;
17766 dev->hw_features |= features;
17767 dev->priv_flags |= IFF_UNICAST_FLT;
17769 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17770 dev->min_mtu = TG3_MIN_MTU;
17771 dev->max_mtu = TG3_MAX_MTU(tp);
17773 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17774 !tg3_flag(tp, TSO_CAPABLE) &&
17775 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17776 tg3_flag_set(tp, MAX_RXPEND_64);
17777 tp->rx_pending = 63;
17780 err = tg3_get_device_address(tp, addr);
17782 dev_err(&pdev->dev,
17783 "Could not obtain valid ethernet address, aborting\n");
17784 goto err_out_apeunmap;
17786 eth_hw_addr_set(dev, addr);
17788 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17789 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17790 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17791 for (i = 0; i < tp->irq_max; i++) {
17792 struct tg3_napi *tnapi = &tp->napi[i];
17795 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17797 tnapi->int_mbox = intmbx;
17803 tnapi->consmbox = rcvmbx;
17804 tnapi->prodmbox = sndmbx;
17807 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17809 tnapi->coal_now = HOSTCC_MODE_NOW;
17811 if (!tg3_flag(tp, SUPPORT_MSIX))
17815 * If we support MSIX, we'll be using RSS. If we're using
17816 * RSS, the first vector only handles link interrupts and the
17817 * remaining vectors handle rx and tx interrupts. Reuse the
17818 * mailbox values for the next iteration. The values we setup
17819 * above are still useful for the single vectored mode.
17833 * Reset chip in case UNDI or EFI driver did not shutdown
17834 * DMA self test will enable WDMAC and we'll see (spurious)
17835 * pending DMA on the PCI bus at that point.
17837 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17838 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17839 tg3_full_lock(tp, 0);
17840 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17841 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17842 tg3_full_unlock(tp);
17845 err = tg3_test_dma(tp);
17847 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17848 goto err_out_apeunmap;
17853 pci_set_drvdata(pdev, dev);
17855 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17856 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17857 tg3_asic_rev(tp) == ASIC_REV_5762)
17858 tg3_flag_set(tp, PTP_CAPABLE);
17860 tg3_timer_init(tp);
17862 tg3_carrier_off(tp);
17864 err = register_netdev(dev);
17866 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17867 goto err_out_apeunmap;
17870 if (tg3_flag(tp, PTP_CAPABLE)) {
17872 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17874 if (IS_ERR(tp->ptp_clock))
17875 tp->ptp_clock = NULL;
17878 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17879 tp->board_part_number,
17880 tg3_chip_rev_id(tp),
17881 tg3_bus_string(tp, str),
17884 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17887 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17888 ethtype = "10/100Base-TX";
17889 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17890 ethtype = "1000Base-SX";
17892 ethtype = "10/100/1000Base-T";
17894 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17895 "(WireSpeed[%d], EEE[%d])\n",
17896 tg3_phy_string(tp), ethtype,
17897 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17898 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17901 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17902 (dev->features & NETIF_F_RXCSUM) != 0,
17903 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17904 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17905 tg3_flag(tp, ENABLE_ASF) != 0,
17906 tg3_flag(tp, TSO_CAPABLE) != 0);
17907 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17909 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17910 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17912 pci_save_state(pdev);
17918 iounmap(tp->aperegs);
17919 tp->aperegs = NULL;
17932 pci_release_regions(pdev);
17934 err_out_disable_pdev:
17935 if (pci_is_enabled(pdev))
17936 pci_disable_device(pdev);
17940 static void tg3_remove_one(struct pci_dev *pdev)
17942 struct net_device *dev = pci_get_drvdata(pdev);
17945 struct tg3 *tp = netdev_priv(dev);
17949 release_firmware(tp->fw);
17951 tg3_reset_task_cancel(tp);
17953 if (tg3_flag(tp, USE_PHYLIB)) {
17958 unregister_netdev(dev);
17960 iounmap(tp->aperegs);
17961 tp->aperegs = NULL;
17968 pci_release_regions(pdev);
17969 pci_disable_device(pdev);
17973 #ifdef CONFIG_PM_SLEEP
17974 static int tg3_suspend(struct device *device)
17976 struct net_device *dev = dev_get_drvdata(device);
17977 struct tg3 *tp = netdev_priv(dev);
17982 if (!netif_running(dev))
17985 tg3_reset_task_cancel(tp);
17987 tg3_netif_stop(tp);
17989 tg3_timer_stop(tp);
17991 tg3_full_lock(tp, 1);
17992 tg3_disable_ints(tp);
17993 tg3_full_unlock(tp);
17995 netif_device_detach(dev);
17997 tg3_full_lock(tp, 0);
17998 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17999 tg3_flag_clear(tp, INIT_COMPLETE);
18000 tg3_full_unlock(tp);
18002 err = tg3_power_down_prepare(tp);
18006 tg3_full_lock(tp, 0);
18008 tg3_flag_set(tp, INIT_COMPLETE);
18009 err2 = tg3_restart_hw(tp, true);
18013 tg3_timer_start(tp);
18015 netif_device_attach(dev);
18016 tg3_netif_start(tp);
18019 tg3_full_unlock(tp);
18030 static int tg3_resume(struct device *device)
18032 struct net_device *dev = dev_get_drvdata(device);
18033 struct tg3 *tp = netdev_priv(dev);
18038 if (!netif_running(dev))
18041 netif_device_attach(dev);
18043 tg3_full_lock(tp, 0);
18045 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18047 tg3_flag_set(tp, INIT_COMPLETE);
18048 err = tg3_restart_hw(tp,
18049 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18053 tg3_timer_start(tp);
18055 tg3_netif_start(tp);
18058 tg3_full_unlock(tp);
18067 #endif /* CONFIG_PM_SLEEP */
18069 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18071 static void tg3_shutdown(struct pci_dev *pdev)
18073 struct net_device *dev = pci_get_drvdata(pdev);
18074 struct tg3 *tp = netdev_priv(dev);
18077 netif_device_detach(dev);
18079 if (netif_running(dev))
18082 if (system_state == SYSTEM_POWER_OFF)
18083 tg3_power_down(tp);
18089 * tg3_io_error_detected - called when PCI error is detected
18090 * @pdev: Pointer to PCI device
18091 * @state: The current pci connection state
18093 * This function is called after a PCI bus error affecting
18094 * this device has been detected.
18096 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18097 pci_channel_state_t state)
18099 struct net_device *netdev = pci_get_drvdata(pdev);
18100 struct tg3 *tp = netdev_priv(netdev);
18101 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18103 netdev_info(netdev, "PCI I/O error detected\n");
18107 /* Could be second call or maybe we don't have netdev yet */
18108 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18111 /* We needn't recover from permanent error */
18112 if (state == pci_channel_io_frozen)
18113 tp->pcierr_recovery = true;
18117 tg3_netif_stop(tp);
18119 tg3_timer_stop(tp);
18121 /* Want to make sure that the reset task doesn't run */
18122 tg3_reset_task_cancel(tp);
18124 netif_device_detach(netdev);
18126 /* Clean up software state, even if MMIO is blocked */
18127 tg3_full_lock(tp, 0);
18128 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18129 tg3_full_unlock(tp);
18132 if (state == pci_channel_io_perm_failure) {
18134 tg3_napi_enable(tp);
18137 err = PCI_ERS_RESULT_DISCONNECT;
18139 pci_disable_device(pdev);
18148 * tg3_io_slot_reset - called after the pci bus has been reset.
18149 * @pdev: Pointer to PCI device
18151 * Restart the card from scratch, as if from a cold-boot.
18152 * At this point, the card has exprienced a hard reset,
18153 * followed by fixups by BIOS, and has its config space
18154 * set up identically to what it was at cold boot.
18156 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18158 struct net_device *netdev = pci_get_drvdata(pdev);
18159 struct tg3 *tp = netdev_priv(netdev);
18160 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18165 if (pci_enable_device(pdev)) {
18166 dev_err(&pdev->dev,
18167 "Cannot re-enable PCI device after reset.\n");
18171 pci_set_master(pdev);
18172 pci_restore_state(pdev);
18173 pci_save_state(pdev);
18175 if (!netdev || !netif_running(netdev)) {
18176 rc = PCI_ERS_RESULT_RECOVERED;
18180 err = tg3_power_up(tp);
18184 rc = PCI_ERS_RESULT_RECOVERED;
18187 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18188 tg3_napi_enable(tp);
18197 * tg3_io_resume - called when traffic can start flowing again.
18198 * @pdev: Pointer to PCI device
18200 * This callback is called when the error recovery driver tells
18201 * us that its OK to resume normal operation.
18203 static void tg3_io_resume(struct pci_dev *pdev)
18205 struct net_device *netdev = pci_get_drvdata(pdev);
18206 struct tg3 *tp = netdev_priv(netdev);
18211 if (!netdev || !netif_running(netdev))
18214 tg3_full_lock(tp, 0);
18215 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18216 tg3_flag_set(tp, INIT_COMPLETE);
18217 err = tg3_restart_hw(tp, true);
18219 tg3_full_unlock(tp);
18220 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18224 netif_device_attach(netdev);
18226 tg3_timer_start(tp);
18228 tg3_netif_start(tp);
18230 tg3_full_unlock(tp);
18235 tp->pcierr_recovery = false;
18239 static const struct pci_error_handlers tg3_err_handler = {
18240 .error_detected = tg3_io_error_detected,
18241 .slot_reset = tg3_io_slot_reset,
18242 .resume = tg3_io_resume
18245 static struct pci_driver tg3_driver = {
18246 .name = DRV_MODULE_NAME,
18247 .id_table = tg3_pci_tbl,
18248 .probe = tg3_init_one,
18249 .remove = tg3_remove_one,
18250 .err_handler = &tg3_err_handler,
18251 .driver.pm = &tg3_pm_ops,
18252 .shutdown = tg3_shutdown,
18255 module_pci_driver(tg3_driver);