2 * tg3.c: Broadcom Tigon3 ethernet driver.
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
47 #include <linux/if_vlan.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
59 #include <net/checksum.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
70 #include <asm/idprom.h>
79 /* Functions & macros to verify TG3_FLAGS types */
81 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
83 return test_bit(flag, bits);
86 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
91 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
93 clear_bit(flag, bits);
96 #define tg3_flag(tp, flag) \
97 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
98 #define tg3_flag_set(tp, flag) \
99 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
100 #define tg3_flag_clear(tp, flag) \
101 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
103 #define DRV_MODULE_NAME "tg3"
104 #define TG3_MAJ_NUM 3
105 #define TG3_MIN_NUM 137
106 #define DRV_MODULE_VERSION \
107 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
108 #define DRV_MODULE_RELDATE "May 11, 2014"
110 #define RESET_KIND_SHUTDOWN 0
111 #define RESET_KIND_INIT 1
112 #define RESET_KIND_SUSPEND 2
114 #define TG3_DEF_RX_MODE 0
115 #define TG3_DEF_TX_MODE 0
116 #define TG3_DEF_MSG_ENABLE \
126 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
128 /* length of time before we decide the hardware is borked,
129 * and dev->tx_timeout() should be called to fix the problem
132 #define TG3_TX_TIMEOUT (5 * HZ)
134 /* hardware minimum and maximum for a single frame's data payload */
135 #define TG3_MIN_MTU ETH_ZLEN
136 #define TG3_MAX_MTU(tp) \
137 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
139 /* These numbers seem to be hard coded in the NIC firmware somehow.
140 * You can't change the ring sizes, but you can change where you place
141 * them in the NIC onboard memory.
143 #define TG3_RX_STD_RING_SIZE(tp) \
144 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
145 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
146 #define TG3_DEF_RX_RING_PENDING 200
147 #define TG3_RX_JMB_RING_SIZE(tp) \
148 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
149 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
150 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
152 /* Do not place this n-ring entries value into the tp struct itself,
153 * we really want to expose these constants to GCC so that modulo et
154 * al. operations are done with shifts and masks instead of with
155 * hw multiply/modulo instructions. Another solution would be to
156 * replace things like '% foo' with '& (foo - 1)'.
159 #define TG3_TX_RING_SIZE 512
160 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
162 #define TG3_RX_STD_RING_BYTES(tp) \
163 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
164 #define TG3_RX_JMB_RING_BYTES(tp) \
165 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
166 #define TG3_RX_RCB_RING_BYTES(tp) \
167 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
168 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
170 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
172 #define TG3_DMA_BYTE_ENAB 64
174 #define TG3_RX_STD_DMA_SZ 1536
175 #define TG3_RX_JMB_DMA_SZ 9046
177 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
179 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
180 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
182 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
183 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
185 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
186 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
188 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
189 * that are at least dword aligned when used in PCIX mode. The driver
190 * works around this bug by double copying the packet. This workaround
191 * is built into the normal double copy length check for efficiency.
193 * However, the double copy is only necessary on those architectures
194 * where unaligned memory accesses are inefficient. For those architectures
195 * where unaligned memory accesses incur little penalty, we can reintegrate
196 * the 5701 in the normal rx path. Doing so saves a device structure
197 * dereference by hardcoding the double copy threshold in place.
199 #define TG3_RX_COPY_THRESHOLD 256
200 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
201 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
203 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
206 #if (NET_IP_ALIGN != 0)
207 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
209 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
212 /* minimum number of free TX descriptors required to wake up TX process */
213 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
214 #define TG3_TX_BD_DMA_MAX_2K 2048
215 #define TG3_TX_BD_DMA_MAX_4K 4096
217 #define TG3_RAW_IP_ALIGN 2
219 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
220 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
222 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
223 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
225 #define FIRMWARE_TG3 "tigon/tg3.bin"
226 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
227 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
228 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
230 static char version[] =
231 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
234 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
235 MODULE_LICENSE("GPL");
236 MODULE_VERSION(DRV_MODULE_VERSION);
237 MODULE_FIRMWARE(FIRMWARE_TG3);
238 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
239 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
241 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
242 module_param(tg3_debug, int, 0);
243 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
245 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
246 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
248 static const struct pci_device_id tg3_pci_tbl[] = {
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
268 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269 TG3_DRV_DATA_FLAG_5705_10_100},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
271 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
272 TG3_DRV_DATA_FLAG_5705_10_100},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
276 TG3_DRV_DATA_FLAG_5705_10_100},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
283 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
297 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
298 PCI_VENDOR_ID_LENOVO,
299 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
300 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
303 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
322 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
323 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
324 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
326 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
327 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
331 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
341 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
343 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
348 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
349 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
350 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
351 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
352 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
353 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
354 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
355 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
356 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
357 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
358 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
359 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
360 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
361 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
362 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
363 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
367 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
369 static const struct {
370 const char string[ETH_GSTRING_LEN];
371 } ethtool_stats_keys[] = {
374 { "rx_ucast_packets" },
375 { "rx_mcast_packets" },
376 { "rx_bcast_packets" },
378 { "rx_align_errors" },
379 { "rx_xon_pause_rcvd" },
380 { "rx_xoff_pause_rcvd" },
381 { "rx_mac_ctrl_rcvd" },
382 { "rx_xoff_entered" },
383 { "rx_frame_too_long_errors" },
385 { "rx_undersize_packets" },
386 { "rx_in_length_errors" },
387 { "rx_out_length_errors" },
388 { "rx_64_or_less_octet_packets" },
389 { "rx_65_to_127_octet_packets" },
390 { "rx_128_to_255_octet_packets" },
391 { "rx_256_to_511_octet_packets" },
392 { "rx_512_to_1023_octet_packets" },
393 { "rx_1024_to_1522_octet_packets" },
394 { "rx_1523_to_2047_octet_packets" },
395 { "rx_2048_to_4095_octet_packets" },
396 { "rx_4096_to_8191_octet_packets" },
397 { "rx_8192_to_9022_octet_packets" },
404 { "tx_flow_control" },
406 { "tx_single_collisions" },
407 { "tx_mult_collisions" },
409 { "tx_excessive_collisions" },
410 { "tx_late_collisions" },
411 { "tx_collide_2times" },
412 { "tx_collide_3times" },
413 { "tx_collide_4times" },
414 { "tx_collide_5times" },
415 { "tx_collide_6times" },
416 { "tx_collide_7times" },
417 { "tx_collide_8times" },
418 { "tx_collide_9times" },
419 { "tx_collide_10times" },
420 { "tx_collide_11times" },
421 { "tx_collide_12times" },
422 { "tx_collide_13times" },
423 { "tx_collide_14times" },
424 { "tx_collide_15times" },
425 { "tx_ucast_packets" },
426 { "tx_mcast_packets" },
427 { "tx_bcast_packets" },
428 { "tx_carrier_sense_errors" },
432 { "dma_writeq_full" },
433 { "dma_write_prioq_full" },
437 { "rx_threshold_hit" },
439 { "dma_readq_full" },
440 { "dma_read_prioq_full" },
441 { "tx_comp_queue_full" },
443 { "ring_set_send_prod_index" },
444 { "ring_status_update" },
446 { "nic_avoided_irqs" },
447 { "nic_tx_threshold_hit" },
449 { "mbuf_lwm_thresh_hit" },
452 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
453 #define TG3_NVRAM_TEST 0
454 #define TG3_LINK_TEST 1
455 #define TG3_REGISTER_TEST 2
456 #define TG3_MEMORY_TEST 3
457 #define TG3_MAC_LOOPB_TEST 4
458 #define TG3_PHY_LOOPB_TEST 5
459 #define TG3_EXT_LOOPB_TEST 6
460 #define TG3_INTERRUPT_TEST 7
463 static const struct {
464 const char string[ETH_GSTRING_LEN];
465 } ethtool_test_keys[] = {
466 [TG3_NVRAM_TEST] = { "nvram test (online) " },
467 [TG3_LINK_TEST] = { "link test (online) " },
468 [TG3_REGISTER_TEST] = { "register test (offline)" },
469 [TG3_MEMORY_TEST] = { "memory test (offline)" },
470 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
471 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
472 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
473 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
476 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
479 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
481 writel(val, tp->regs + off);
484 static u32 tg3_read32(struct tg3 *tp, u32 off)
486 return readl(tp->regs + off);
489 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
491 writel(val, tp->aperegs + off);
494 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
496 return readl(tp->aperegs + off);
499 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
503 spin_lock_irqsave(&tp->indirect_lock, flags);
504 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
506 spin_unlock_irqrestore(&tp->indirect_lock, flags);
509 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
511 writel(val, tp->regs + off);
512 readl(tp->regs + off);
515 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
520 spin_lock_irqsave(&tp->indirect_lock, flags);
521 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
522 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
523 spin_unlock_irqrestore(&tp->indirect_lock, flags);
527 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
531 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
532 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
533 TG3_64BIT_REG_LOW, val);
536 if (off == TG3_RX_STD_PROD_IDX_REG) {
537 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
538 TG3_64BIT_REG_LOW, val);
542 spin_lock_irqsave(&tp->indirect_lock, flags);
543 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
545 spin_unlock_irqrestore(&tp->indirect_lock, flags);
547 /* In indirect mode when disabling interrupts, we also need
548 * to clear the interrupt bit in the GRC local ctrl register.
550 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
552 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
553 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
557 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
562 spin_lock_irqsave(&tp->indirect_lock, flags);
563 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
564 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
565 spin_unlock_irqrestore(&tp->indirect_lock, flags);
569 /* usec_wait specifies the wait time in usec when writing to certain registers
570 * where it is unsafe to read back the register without some delay.
571 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
572 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
574 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
576 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
577 /* Non-posted methods */
578 tp->write32(tp, off, val);
581 tg3_write32(tp, off, val);
586 /* Wait again after the read for the posted method to guarantee that
587 * the wait time is met.
593 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
595 tp->write32_mbox(tp, off, val);
596 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
597 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
598 !tg3_flag(tp, ICH_WORKAROUND)))
599 tp->read32_mbox(tp, off);
602 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
604 void __iomem *mbox = tp->regs + off;
606 if (tg3_flag(tp, TXD_MBOX_HWBUG))
608 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
609 tg3_flag(tp, FLUSH_POSTED_WRITES))
613 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
615 return readl(tp->regs + off + GRCMBOX_BASE);
618 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
620 writel(val, tp->regs + off + GRCMBOX_BASE);
623 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
624 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
625 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
626 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
627 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
629 #define tw32(reg, val) tp->write32(tp, reg, val)
630 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
631 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
632 #define tr32(reg) tp->read32(tp, reg)
634 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
638 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
639 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
642 spin_lock_irqsave(&tp->indirect_lock, flags);
643 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
644 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
645 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
647 /* Always leave this as zero. */
648 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
650 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
651 tw32_f(TG3PCI_MEM_WIN_DATA, val);
653 /* Always leave this as zero. */
654 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
656 spin_unlock_irqrestore(&tp->indirect_lock, flags);
659 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
663 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
664 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
669 spin_lock_irqsave(&tp->indirect_lock, flags);
670 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
671 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
672 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
674 /* Always leave this as zero. */
675 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
677 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
678 *val = tr32(TG3PCI_MEM_WIN_DATA);
680 /* Always leave this as zero. */
681 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
683 spin_unlock_irqrestore(&tp->indirect_lock, flags);
686 static void tg3_ape_lock_init(struct tg3 *tp)
691 if (tg3_asic_rev(tp) == ASIC_REV_5761)
692 regbase = TG3_APE_LOCK_GRANT;
694 regbase = TG3_APE_PER_LOCK_GRANT;
696 /* Make sure the driver hasn't any stale locks. */
697 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
699 case TG3_APE_LOCK_PHY0:
700 case TG3_APE_LOCK_PHY1:
701 case TG3_APE_LOCK_PHY2:
702 case TG3_APE_LOCK_PHY3:
703 bit = APE_LOCK_GRANT_DRIVER;
707 bit = APE_LOCK_GRANT_DRIVER;
709 bit = 1 << tp->pci_fn;
711 tg3_ape_write32(tp, regbase + 4 * i, bit);
716 static int tg3_ape_lock(struct tg3 *tp, int locknum)
720 u32 status, req, gnt, bit;
722 if (!tg3_flag(tp, ENABLE_APE))
726 case TG3_APE_LOCK_GPIO:
727 if (tg3_asic_rev(tp) == ASIC_REV_5761)
729 /* else: fall through */
730 case TG3_APE_LOCK_GRC:
731 case TG3_APE_LOCK_MEM:
733 bit = APE_LOCK_REQ_DRIVER;
735 bit = 1 << tp->pci_fn;
737 case TG3_APE_LOCK_PHY0:
738 case TG3_APE_LOCK_PHY1:
739 case TG3_APE_LOCK_PHY2:
740 case TG3_APE_LOCK_PHY3:
741 bit = APE_LOCK_REQ_DRIVER;
747 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
748 req = TG3_APE_LOCK_REQ;
749 gnt = TG3_APE_LOCK_GRANT;
751 req = TG3_APE_PER_LOCK_REQ;
752 gnt = TG3_APE_PER_LOCK_GRANT;
757 tg3_ape_write32(tp, req + off, bit);
759 /* Wait for up to 1 millisecond to acquire lock. */
760 for (i = 0; i < 100; i++) {
761 status = tg3_ape_read32(tp, gnt + off);
764 if (pci_channel_offline(tp->pdev))
771 /* Revoke the lock request. */
772 tg3_ape_write32(tp, gnt + off, bit);
779 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
783 if (!tg3_flag(tp, ENABLE_APE))
787 case TG3_APE_LOCK_GPIO:
788 if (tg3_asic_rev(tp) == ASIC_REV_5761)
790 /* else: fall through */
791 case TG3_APE_LOCK_GRC:
792 case TG3_APE_LOCK_MEM:
794 bit = APE_LOCK_GRANT_DRIVER;
796 bit = 1 << tp->pci_fn;
798 case TG3_APE_LOCK_PHY0:
799 case TG3_APE_LOCK_PHY1:
800 case TG3_APE_LOCK_PHY2:
801 case TG3_APE_LOCK_PHY3:
802 bit = APE_LOCK_GRANT_DRIVER;
808 if (tg3_asic_rev(tp) == ASIC_REV_5761)
809 gnt = TG3_APE_LOCK_GRANT;
811 gnt = TG3_APE_PER_LOCK_GRANT;
813 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
816 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
821 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
828 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
831 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
834 return timeout_us ? 0 : -EBUSY;
837 #ifdef CONFIG_TIGON3_HWMON
838 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
842 for (i = 0; i < timeout_us / 10; i++) {
843 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
845 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
851 return i == timeout_us / 10;
854 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
858 u32 i, bufoff, msgoff, maxlen, apedata;
860 if (!tg3_flag(tp, APE_HAS_NCSI))
863 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
864 if (apedata != APE_SEG_SIG_MAGIC)
867 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
868 if (!(apedata & APE_FW_STATUS_READY))
871 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
873 msgoff = bufoff + 2 * sizeof(u32);
874 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
879 /* Cap xfer sizes to scratchpad limits. */
880 length = (len > maxlen) ? maxlen : len;
883 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
884 if (!(apedata & APE_FW_STATUS_READY))
887 /* Wait for up to 1 msec for APE to service previous event. */
888 err = tg3_ape_event_lock(tp, 1000);
892 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
893 APE_EVENT_STATUS_SCRTCHPD_READ |
894 APE_EVENT_STATUS_EVENT_PENDING;
895 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
897 tg3_ape_write32(tp, bufoff, base_off);
898 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
900 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
901 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
905 if (tg3_ape_wait_for_event(tp, 30000))
908 for (i = 0; length; i += 4, length -= 4) {
909 u32 val = tg3_ape_read32(tp, msgoff + i);
910 memcpy(data, &val, sizeof(u32));
919 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
924 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
925 if (apedata != APE_SEG_SIG_MAGIC)
928 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
929 if (!(apedata & APE_FW_STATUS_READY))
932 /* Wait for up to 20 millisecond for APE to service previous event. */
933 err = tg3_ape_event_lock(tp, 20000);
937 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
938 event | APE_EVENT_STATUS_EVENT_PENDING);
940 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
941 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
946 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
951 if (!tg3_flag(tp, ENABLE_APE))
955 case RESET_KIND_INIT:
956 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
958 APE_HOST_SEG_SIG_MAGIC);
959 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
960 APE_HOST_SEG_LEN_MAGIC);
961 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
962 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
963 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
964 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
965 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
966 APE_HOST_BEHAV_NO_PHYLOCK);
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
968 TG3_APE_HOST_DRVR_STATE_START);
970 event = APE_EVENT_STATUS_STATE_START;
972 case RESET_KIND_SHUTDOWN:
973 if (device_may_wakeup(&tp->pdev->dev) &&
974 tg3_flag(tp, WOL_ENABLE)) {
975 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
976 TG3_APE_HOST_WOL_SPEED_AUTO);
977 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
979 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
981 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
983 event = APE_EVENT_STATUS_STATE_UNLOAD;
989 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
991 tg3_ape_send_event(tp, event);
994 static void tg3_send_ape_heartbeat(struct tg3 *tp,
995 unsigned long interval)
997 /* Check if hb interval has exceeded */
998 if (!tg3_flag(tp, ENABLE_APE) ||
999 time_before(jiffies, tp->ape_hb_jiffies + interval))
1002 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
1003 tp->ape_hb_jiffies = jiffies;
1006 static void tg3_disable_ints(struct tg3 *tp)
1010 tw32(TG3PCI_MISC_HOST_CTRL,
1011 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1012 for (i = 0; i < tp->irq_max; i++)
1013 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1016 static void tg3_enable_ints(struct tg3 *tp)
1023 tw32(TG3PCI_MISC_HOST_CTRL,
1024 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1026 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1027 for (i = 0; i < tp->irq_cnt; i++) {
1028 struct tg3_napi *tnapi = &tp->napi[i];
1030 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1031 if (tg3_flag(tp, 1SHOT_MSI))
1032 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1034 tp->coal_now |= tnapi->coal_now;
1037 /* Force an initial interrupt */
1038 if (!tg3_flag(tp, TAGGED_STATUS) &&
1039 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1040 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1042 tw32(HOSTCC_MODE, tp->coal_now);
1044 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1047 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1049 struct tg3 *tp = tnapi->tp;
1050 struct tg3_hw_status *sblk = tnapi->hw_status;
1051 unsigned int work_exists = 0;
1053 /* check for phy events */
1054 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1055 if (sblk->status & SD_STATUS_LINK_CHG)
1059 /* check for TX work to do */
1060 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1063 /* check for RX work to do */
1064 if (tnapi->rx_rcb_prod_idx &&
1065 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1072 * similar to tg3_enable_ints, but it accurately determines whether there
1073 * is new work pending and can return without flushing the PIO write
1074 * which reenables interrupts
1076 static void tg3_int_reenable(struct tg3_napi *tnapi)
1078 struct tg3 *tp = tnapi->tp;
1080 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1083 /* When doing tagged status, this work check is unnecessary.
1084 * The last_tag we write above tells the chip which piece of
1085 * work we've completed.
1087 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1088 tw32(HOSTCC_MODE, tp->coalesce_mode |
1089 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1092 static void tg3_switch_clocks(struct tg3 *tp)
1095 u32 orig_clock_ctrl;
1097 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1100 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1102 orig_clock_ctrl = clock_ctrl;
1103 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1104 CLOCK_CTRL_CLKRUN_OENABLE |
1106 tp->pci_clock_ctrl = clock_ctrl;
1108 if (tg3_flag(tp, 5705_PLUS)) {
1109 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1110 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1111 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1113 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1114 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1116 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1118 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1119 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1122 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1125 #define PHY_BUSY_LOOPS 5000
1127 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1134 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1136 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1140 tg3_ape_lock(tp, tp->phy_ape_lock);
1144 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1145 MI_COM_PHY_ADDR_MASK);
1146 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1147 MI_COM_REG_ADDR_MASK);
1148 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1150 tw32_f(MAC_MI_COM, frame_val);
1152 loops = PHY_BUSY_LOOPS;
1153 while (loops != 0) {
1155 frame_val = tr32(MAC_MI_COM);
1157 if ((frame_val & MI_COM_BUSY) == 0) {
1159 frame_val = tr32(MAC_MI_COM);
1167 *val = frame_val & MI_COM_DATA_MASK;
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE, tp->mi_mode);
1176 tg3_ape_unlock(tp, tp->phy_ape_lock);
1181 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1183 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1186 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1193 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1194 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1197 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1199 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1203 tg3_ape_lock(tp, tp->phy_ape_lock);
1205 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1206 MI_COM_PHY_ADDR_MASK);
1207 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1208 MI_COM_REG_ADDR_MASK);
1209 frame_val |= (val & MI_COM_DATA_MASK);
1210 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1212 tw32_f(MAC_MI_COM, frame_val);
1214 loops = PHY_BUSY_LOOPS;
1215 while (loops != 0) {
1217 frame_val = tr32(MAC_MI_COM);
1218 if ((frame_val & MI_COM_BUSY) == 0) {
1220 frame_val = tr32(MAC_MI_COM);
1230 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1231 tw32_f(MAC_MI_MODE, tp->mi_mode);
1235 tg3_ape_unlock(tp, tp->phy_ape_lock);
1240 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1242 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1245 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1249 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1257 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1262 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1268 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1272 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1276 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1280 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1281 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1285 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1291 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1295 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1302 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1306 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1308 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1313 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1317 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1318 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1319 MII_TG3_AUXCTL_SHDWSEL_MISC);
1321 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1326 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1328 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1329 set |= MII_TG3_AUXCTL_MISC_WREN;
1331 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1334 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1339 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1345 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1347 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1349 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1350 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1355 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1357 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1358 reg | val | MII_TG3_MISC_SHDW_WREN);
1361 static int tg3_bmcr_reset(struct tg3 *tp)
1366 /* OK, reset it, and poll the BMCR_RESET bit until it
1367 * clears or we time out.
1369 phy_control = BMCR_RESET;
1370 err = tg3_writephy(tp, MII_BMCR, phy_control);
1376 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1380 if ((phy_control & BMCR_RESET) == 0) {
1392 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1394 struct tg3 *tp = bp->priv;
1397 spin_lock_bh(&tp->lock);
1399 if (__tg3_readphy(tp, mii_id, reg, &val))
1402 spin_unlock_bh(&tp->lock);
1407 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1409 struct tg3 *tp = bp->priv;
1412 spin_lock_bh(&tp->lock);
1414 if (__tg3_writephy(tp, mii_id, reg, val))
1417 spin_unlock_bh(&tp->lock);
1422 static void tg3_mdio_config_5785(struct tg3 *tp)
1425 struct phy_device *phydev;
1427 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1428 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1429 case PHY_ID_BCM50610:
1430 case PHY_ID_BCM50610M:
1431 val = MAC_PHYCFG2_50610_LED_MODES;
1433 case PHY_ID_BCMAC131:
1434 val = MAC_PHYCFG2_AC131_LED_MODES;
1436 case PHY_ID_RTL8211C:
1437 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1439 case PHY_ID_RTL8201E:
1440 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1446 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1447 tw32(MAC_PHYCFG2, val);
1449 val = tr32(MAC_PHYCFG1);
1450 val &= ~(MAC_PHYCFG1_RGMII_INT |
1451 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1452 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1453 tw32(MAC_PHYCFG1, val);
1458 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1459 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1460 MAC_PHYCFG2_FMODE_MASK_MASK |
1461 MAC_PHYCFG2_GMODE_MASK_MASK |
1462 MAC_PHYCFG2_ACT_MASK_MASK |
1463 MAC_PHYCFG2_QUAL_MASK_MASK |
1464 MAC_PHYCFG2_INBAND_ENABLE;
1466 tw32(MAC_PHYCFG2, val);
1468 val = tr32(MAC_PHYCFG1);
1469 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1470 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1471 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1474 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1475 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1477 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1478 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1479 tw32(MAC_PHYCFG1, val);
1481 val = tr32(MAC_EXT_RGMII_MODE);
1482 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1483 MAC_RGMII_MODE_RX_QUALITY |
1484 MAC_RGMII_MODE_RX_ACTIVITY |
1485 MAC_RGMII_MODE_RX_ENG_DET |
1486 MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET);
1489 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1490 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1491 val |= MAC_RGMII_MODE_RX_INT_B |
1492 MAC_RGMII_MODE_RX_QUALITY |
1493 MAC_RGMII_MODE_RX_ACTIVITY |
1494 MAC_RGMII_MODE_RX_ENG_DET;
1495 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1496 val |= MAC_RGMII_MODE_TX_ENABLE |
1497 MAC_RGMII_MODE_TX_LOWPWR |
1498 MAC_RGMII_MODE_TX_RESET;
1500 tw32(MAC_EXT_RGMII_MODE, val);
1503 static void tg3_mdio_start(struct tg3 *tp)
1505 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1506 tw32_f(MAC_MI_MODE, tp->mi_mode);
1509 if (tg3_flag(tp, MDIOBUS_INITED) &&
1510 tg3_asic_rev(tp) == ASIC_REV_5785)
1511 tg3_mdio_config_5785(tp);
1514 static int tg3_mdio_init(struct tg3 *tp)
1518 struct phy_device *phydev;
1520 if (tg3_flag(tp, 5717_PLUS)) {
1523 tp->phy_addr = tp->pci_fn + 1;
1525 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1526 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1528 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1529 TG3_CPMU_PHY_STRAP_IS_SERDES;
1532 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1535 addr = ssb_gige_get_phyaddr(tp->pdev);
1538 tp->phy_addr = addr;
1540 tp->phy_addr = TG3_PHY_MII_ADDR;
1544 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1547 tp->mdio_bus = mdiobus_alloc();
1548 if (tp->mdio_bus == NULL)
1551 tp->mdio_bus->name = "tg3 mdio bus";
1552 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1553 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1554 tp->mdio_bus->priv = tp;
1555 tp->mdio_bus->parent = &tp->pdev->dev;
1556 tp->mdio_bus->read = &tg3_mdio_read;
1557 tp->mdio_bus->write = &tg3_mdio_write;
1558 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1560 /* The bus registration will look for all the PHYs on the mdio bus.
1561 * Unfortunately, it does not ensure the PHY is powered up before
1562 * accessing the PHY ID registers. A chip reset is the
1563 * quickest way to bring the device back to an operational state..
1565 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1568 i = mdiobus_register(tp->mdio_bus);
1570 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1571 mdiobus_free(tp->mdio_bus);
1575 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1577 if (!phydev || !phydev->drv) {
1578 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1579 mdiobus_unregister(tp->mdio_bus);
1580 mdiobus_free(tp->mdio_bus);
1584 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1585 case PHY_ID_BCM57780:
1586 phydev->interface = PHY_INTERFACE_MODE_GMII;
1587 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1589 case PHY_ID_BCM50610:
1590 case PHY_ID_BCM50610M:
1591 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1592 PHY_BRCM_RX_REFCLK_UNUSED |
1593 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1594 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1595 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1596 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1597 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1598 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1599 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1600 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1602 case PHY_ID_RTL8211C:
1603 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1605 case PHY_ID_RTL8201E:
1606 case PHY_ID_BCMAC131:
1607 phydev->interface = PHY_INTERFACE_MODE_MII;
1608 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1609 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1613 tg3_flag_set(tp, MDIOBUS_INITED);
1615 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1616 tg3_mdio_config_5785(tp);
1621 static void tg3_mdio_fini(struct tg3 *tp)
1623 if (tg3_flag(tp, MDIOBUS_INITED)) {
1624 tg3_flag_clear(tp, MDIOBUS_INITED);
1625 mdiobus_unregister(tp->mdio_bus);
1626 mdiobus_free(tp->mdio_bus);
1630 /* tp->lock is held. */
1631 static inline void tg3_generate_fw_event(struct tg3 *tp)
1635 val = tr32(GRC_RX_CPU_EVENT);
1636 val |= GRC_RX_CPU_DRIVER_EVENT;
1637 tw32_f(GRC_RX_CPU_EVENT, val);
1639 tp->last_event_jiffies = jiffies;
1642 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1644 /* tp->lock is held. */
1645 static void tg3_wait_for_event_ack(struct tg3 *tp)
1648 unsigned int delay_cnt;
1651 /* If enough time has passed, no wait is necessary. */
1652 time_remain = (long)(tp->last_event_jiffies + 1 +
1653 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1655 if (time_remain < 0)
1658 /* Check if we can shorten the wait time. */
1659 delay_cnt = jiffies_to_usecs(time_remain);
1660 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1661 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1662 delay_cnt = (delay_cnt >> 3) + 1;
1664 for (i = 0; i < delay_cnt; i++) {
1665 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1667 if (pci_channel_offline(tp->pdev))
1674 /* tp->lock is held. */
1675 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1680 if (!tg3_readphy(tp, MII_BMCR, ®))
1682 if (!tg3_readphy(tp, MII_BMSR, ®))
1683 val |= (reg & 0xffff);
1687 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1689 if (!tg3_readphy(tp, MII_LPA, ®))
1690 val |= (reg & 0xffff);
1694 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1695 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1697 if (!tg3_readphy(tp, MII_STAT1000, ®))
1698 val |= (reg & 0xffff);
1702 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1709 /* tp->lock is held. */
1710 static void tg3_ump_link_report(struct tg3 *tp)
1714 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1717 tg3_phy_gather_ump_data(tp, data);
1719 tg3_wait_for_event_ack(tp);
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1722 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1723 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1724 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1725 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1726 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1728 tg3_generate_fw_event(tp);
1731 /* tp->lock is held. */
1732 static void tg3_stop_fw(struct tg3 *tp)
1734 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1735 /* Wait for RX cpu to ACK the previous event. */
1736 tg3_wait_for_event_ack(tp);
1738 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1740 tg3_generate_fw_event(tp);
1742 /* Wait for RX cpu to ACK this event. */
1743 tg3_wait_for_event_ack(tp);
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1750 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1751 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1753 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1755 case RESET_KIND_INIT:
1756 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 case RESET_KIND_SHUTDOWN:
1761 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 case RESET_KIND_SUSPEND:
1766 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1776 /* tp->lock is held. */
1777 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1779 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1781 case RESET_KIND_INIT:
1782 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 DRV_STATE_START_DONE);
1786 case RESET_KIND_SHUTDOWN:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 DRV_STATE_UNLOAD_DONE);
1797 /* tp->lock is held. */
1798 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1800 if (tg3_flag(tp, ENABLE_ASF)) {
1802 case RESET_KIND_INIT:
1803 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1807 case RESET_KIND_SHUTDOWN:
1808 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1812 case RESET_KIND_SUSPEND:
1813 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1823 static int tg3_poll_fw(struct tg3 *tp)
1828 if (tg3_flag(tp, NO_FWARE_REPORTED))
1831 if (tg3_flag(tp, IS_SSB_CORE)) {
1832 /* We don't use firmware. */
1836 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1837 /* Wait up to 20ms for init done. */
1838 for (i = 0; i < 200; i++) {
1839 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1841 if (pci_channel_offline(tp->pdev))
1849 /* Wait for firmware initialization to complete. */
1850 for (i = 0; i < 100000; i++) {
1851 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1852 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1854 if (pci_channel_offline(tp->pdev)) {
1855 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 netdev_info(tp->dev, "No firmware running\n");
1866 /* Chip might not be fitted with firmware. Some Sun onboard
1867 * parts are configured like that. So don't signal the timeout
1868 * of the above loop as an error, but do report the lack of
1869 * running firmware once.
1871 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1872 tg3_flag_set(tp, NO_FWARE_REPORTED);
1874 netdev_info(tp->dev, "No firmware running\n");
1877 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1878 /* The 57765 A0 needs a little more
1879 * time to do some important work.
1887 static void tg3_link_report(struct tg3 *tp)
1889 if (!netif_carrier_ok(tp->dev)) {
1890 netif_info(tp, link, tp->dev, "Link is down\n");
1891 tg3_ump_link_report(tp);
1892 } else if (netif_msg_link(tp)) {
1893 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1894 (tp->link_config.active_speed == SPEED_1000 ?
1896 (tp->link_config.active_speed == SPEED_100 ?
1898 (tp->link_config.active_duplex == DUPLEX_FULL ?
1901 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1902 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1904 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1907 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1908 netdev_info(tp->dev, "EEE is %s\n",
1909 tp->setlpicnt ? "enabled" : "disabled");
1911 tg3_ump_link_report(tp);
1914 tp->link_up = netif_carrier_ok(tp->dev);
1917 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1921 if (adv & ADVERTISE_PAUSE_CAP) {
1922 flowctrl |= FLOW_CTRL_RX;
1923 if (!(adv & ADVERTISE_PAUSE_ASYM))
1924 flowctrl |= FLOW_CTRL_TX;
1925 } else if (adv & ADVERTISE_PAUSE_ASYM)
1926 flowctrl |= FLOW_CTRL_TX;
1931 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1935 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1936 miireg = ADVERTISE_1000XPAUSE;
1937 else if (flow_ctrl & FLOW_CTRL_TX)
1938 miireg = ADVERTISE_1000XPSE_ASYM;
1939 else if (flow_ctrl & FLOW_CTRL_RX)
1940 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1947 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1951 if (adv & ADVERTISE_1000XPAUSE) {
1952 flowctrl |= FLOW_CTRL_RX;
1953 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1954 flowctrl |= FLOW_CTRL_TX;
1955 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1956 flowctrl |= FLOW_CTRL_TX;
1961 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1965 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1966 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1967 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1968 if (lcladv & ADVERTISE_1000XPAUSE)
1970 if (rmtadv & ADVERTISE_1000XPAUSE)
1977 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1981 u32 old_rx_mode = tp->rx_mode;
1982 u32 old_tx_mode = tp->tx_mode;
1984 if (tg3_flag(tp, USE_PHYLIB))
1985 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1987 autoneg = tp->link_config.autoneg;
1989 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1990 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1991 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1993 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1995 flowctrl = tp->link_config.flowctrl;
1997 tp->link_config.active_flowctrl = flowctrl;
1999 if (flowctrl & FLOW_CTRL_RX)
2000 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
2002 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
2004 if (old_rx_mode != tp->rx_mode)
2005 tw32_f(MAC_RX_MODE, tp->rx_mode);
2007 if (flowctrl & FLOW_CTRL_TX)
2008 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2010 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2012 if (old_tx_mode != tp->tx_mode)
2013 tw32_f(MAC_TX_MODE, tp->tx_mode);
2016 static void tg3_adjust_link(struct net_device *dev)
2018 u8 oldflowctrl, linkmesg = 0;
2019 u32 mac_mode, lcl_adv, rmt_adv;
2020 struct tg3 *tp = netdev_priv(dev);
2021 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2023 spin_lock_bh(&tp->lock);
2025 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2026 MAC_MODE_HALF_DUPLEX);
2028 oldflowctrl = tp->link_config.active_flowctrl;
2034 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2035 mac_mode |= MAC_MODE_PORT_MODE_MII;
2036 else if (phydev->speed == SPEED_1000 ||
2037 tg3_asic_rev(tp) != ASIC_REV_5785)
2038 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040 mac_mode |= MAC_MODE_PORT_MODE_MII;
2042 if (phydev->duplex == DUPLEX_HALF)
2043 mac_mode |= MAC_MODE_HALF_DUPLEX;
2045 lcl_adv = mii_advertise_flowctrl(
2046 tp->link_config.flowctrl);
2049 rmt_adv = LPA_PAUSE_CAP;
2050 if (phydev->asym_pause)
2051 rmt_adv |= LPA_PAUSE_ASYM;
2054 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2056 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2058 if (mac_mode != tp->mac_mode) {
2059 tp->mac_mode = mac_mode;
2060 tw32_f(MAC_MODE, tp->mac_mode);
2064 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2065 if (phydev->speed == SPEED_10)
2067 MAC_MI_STAT_10MBPS_MODE |
2068 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2070 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2073 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2074 tw32(MAC_TX_LENGTHS,
2075 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2076 (6 << TX_LENGTHS_IPG_SHIFT) |
2077 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2079 tw32(MAC_TX_LENGTHS,
2080 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2081 (6 << TX_LENGTHS_IPG_SHIFT) |
2082 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2084 if (phydev->link != tp->old_link ||
2085 phydev->speed != tp->link_config.active_speed ||
2086 phydev->duplex != tp->link_config.active_duplex ||
2087 oldflowctrl != tp->link_config.active_flowctrl)
2090 tp->old_link = phydev->link;
2091 tp->link_config.active_speed = phydev->speed;
2092 tp->link_config.active_duplex = phydev->duplex;
2094 spin_unlock_bh(&tp->lock);
2097 tg3_link_report(tp);
2100 static int tg3_phy_init(struct tg3 *tp)
2102 struct phy_device *phydev;
2104 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2107 /* Bring the PHY back to a known state. */
2110 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2112 /* Attach the MAC to the PHY. */
2113 phydev = phy_connect(tp->dev, phydev_name(phydev),
2114 tg3_adjust_link, phydev->interface);
2115 if (IS_ERR(phydev)) {
2116 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2117 return PTR_ERR(phydev);
2120 /* Mask with MAC supported features. */
2121 switch (phydev->interface) {
2122 case PHY_INTERFACE_MODE_GMII:
2123 case PHY_INTERFACE_MODE_RGMII:
2124 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2125 phy_set_max_speed(phydev, SPEED_1000);
2126 phy_support_asym_pause(phydev);
2130 case PHY_INTERFACE_MODE_MII:
2131 phy_set_max_speed(phydev, SPEED_100);
2132 phy_support_asym_pause(phydev);
2135 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2139 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2141 phy_attached_info(phydev);
2146 static void tg3_phy_start(struct tg3 *tp)
2148 struct phy_device *phydev;
2150 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2153 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2155 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2156 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2157 phydev->speed = tp->link_config.speed;
2158 phydev->duplex = tp->link_config.duplex;
2159 phydev->autoneg = tp->link_config.autoneg;
2160 phydev->advertising = tp->link_config.advertising;
2165 phy_start_aneg(phydev);
2168 static void tg3_phy_stop(struct tg3 *tp)
2170 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2173 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2176 static void tg3_phy_fini(struct tg3 *tp)
2178 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2179 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2180 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2184 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2189 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2192 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2193 /* Cannot do read-modify-write on 5401 */
2194 err = tg3_phy_auxctl_write(tp,
2195 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2196 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2201 err = tg3_phy_auxctl_read(tp,
2202 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2206 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2207 err = tg3_phy_auxctl_write(tp,
2208 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2214 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2218 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2221 tg3_writephy(tp, MII_TG3_FET_TEST,
2222 phytest | MII_TG3_FET_SHADOW_EN);
2223 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2225 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2227 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2228 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2230 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2234 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2238 if (!tg3_flag(tp, 5705_PLUS) ||
2239 (tg3_flag(tp, 5717_PLUS) &&
2240 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2243 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2244 tg3_phy_fet_toggle_apd(tp, enable);
2248 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2249 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2250 MII_TG3_MISC_SHDW_SCR5_SDTL |
2251 MII_TG3_MISC_SHDW_SCR5_C125OE;
2252 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2253 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2255 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2258 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2260 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2262 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2265 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2269 if (!tg3_flag(tp, 5705_PLUS) ||
2270 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2273 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2276 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2277 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2279 tg3_writephy(tp, MII_TG3_FET_TEST,
2280 ephy | MII_TG3_FET_SHADOW_EN);
2281 if (!tg3_readphy(tp, reg, &phy)) {
2283 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2285 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2286 tg3_writephy(tp, reg, phy);
2288 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2293 ret = tg3_phy_auxctl_read(tp,
2294 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2297 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2299 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2300 tg3_phy_auxctl_write(tp,
2301 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2306 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2311 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2314 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2316 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2317 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2320 static void tg3_phy_apply_otp(struct tg3 *tp)
2329 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2332 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2333 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2334 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2336 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2337 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2338 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2340 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2341 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2342 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2344 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2345 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2347 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2348 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2350 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2351 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2352 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2354 tg3_phy_toggle_auxctl_smdsp(tp, false);
2357 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2360 struct ethtool_eee *dest = &tp->eee;
2362 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2371 /* Pull eee_active */
2372 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2373 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2374 dest->eee_active = 1;
2376 dest->eee_active = 0;
2378 /* Pull lp advertised settings */
2379 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2381 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2383 /* Pull advertised and eee_enabled settings */
2384 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2386 dest->eee_enabled = !!val;
2387 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2389 /* Pull tx_lpi_enabled */
2390 val = tr32(TG3_CPMU_EEE_MODE);
2391 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2393 /* Pull lpi timer value */
2394 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2397 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2401 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2406 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2408 tp->link_config.active_duplex == DUPLEX_FULL &&
2409 (tp->link_config.active_speed == SPEED_100 ||
2410 tp->link_config.active_speed == SPEED_1000)) {
2413 if (tp->link_config.active_speed == SPEED_1000)
2414 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2416 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2418 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2420 tg3_eee_pull_config(tp, NULL);
2421 if (tp->eee.eee_active)
2425 if (!tp->setlpicnt) {
2426 if (current_link_up &&
2427 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2428 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2429 tg3_phy_toggle_auxctl_smdsp(tp, false);
2432 val = tr32(TG3_CPMU_EEE_MODE);
2433 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2437 static void tg3_phy_eee_enable(struct tg3 *tp)
2441 if (tp->link_config.active_speed == SPEED_1000 &&
2442 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2443 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2444 tg3_flag(tp, 57765_CLASS)) &&
2445 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2446 val = MII_TG3_DSP_TAP26_ALNOKO |
2447 MII_TG3_DSP_TAP26_RMRXSTO;
2448 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2449 tg3_phy_toggle_auxctl_smdsp(tp, false);
2452 val = tr32(TG3_CPMU_EEE_MODE);
2453 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2456 static int tg3_wait_macro_done(struct tg3 *tp)
2463 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2464 if ((tmp32 & 0x1000) == 0)
2474 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2476 static const u32 test_pat[4][6] = {
2477 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2478 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2479 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2480 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2484 for (chan = 0; chan < 4; chan++) {
2487 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2488 (chan * 0x2000) | 0x0200);
2489 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2491 for (i = 0; i < 6; i++)
2492 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2495 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2496 if (tg3_wait_macro_done(tp)) {
2501 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2502 (chan * 0x2000) | 0x0200);
2503 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2504 if (tg3_wait_macro_done(tp)) {
2509 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2510 if (tg3_wait_macro_done(tp)) {
2515 for (i = 0; i < 6; i += 2) {
2518 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2519 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2520 tg3_wait_macro_done(tp)) {
2526 if (low != test_pat[chan][i] ||
2527 high != test_pat[chan][i+1]) {
2528 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2529 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2530 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2540 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2544 for (chan = 0; chan < 4; chan++) {
2547 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2548 (chan * 0x2000) | 0x0200);
2549 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2550 for (i = 0; i < 6; i++)
2551 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2552 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2553 if (tg3_wait_macro_done(tp))
2560 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2562 u32 reg32, phy9_orig;
2563 int retries, do_phy_reset, err;
2569 err = tg3_bmcr_reset(tp);
2575 /* Disable transmitter and interrupt. */
2576 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2580 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2582 /* Set full-duplex, 1000 mbps. */
2583 tg3_writephy(tp, MII_BMCR,
2584 BMCR_FULLDPLX | BMCR_SPEED1000);
2586 /* Set to master mode. */
2587 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2590 tg3_writephy(tp, MII_CTRL1000,
2591 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2593 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2597 /* Block the PHY control access. */
2598 tg3_phydsp_write(tp, 0x8005, 0x0800);
2600 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2603 } while (--retries);
2605 err = tg3_phy_reset_chanpat(tp);
2609 tg3_phydsp_write(tp, 0x8005, 0x0000);
2611 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2612 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2614 tg3_phy_toggle_auxctl_smdsp(tp, false);
2616 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2618 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2623 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2628 static void tg3_carrier_off(struct tg3 *tp)
2630 netif_carrier_off(tp->dev);
2631 tp->link_up = false;
2634 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2636 if (tg3_flag(tp, ENABLE_ASF))
2637 netdev_warn(tp->dev,
2638 "Management side-band traffic will be interrupted during phy settings change\n");
2641 /* This will reset the tigon3 PHY if there is no valid
2642 * link unless the FORCE argument is non-zero.
2644 static int tg3_phy_reset(struct tg3 *tp)
2649 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2650 val = tr32(GRC_MISC_CFG);
2651 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2654 err = tg3_readphy(tp, MII_BMSR, &val);
2655 err |= tg3_readphy(tp, MII_BMSR, &val);
2659 if (netif_running(tp->dev) && tp->link_up) {
2660 netif_carrier_off(tp->dev);
2661 tg3_link_report(tp);
2664 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2665 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2666 tg3_asic_rev(tp) == ASIC_REV_5705) {
2667 err = tg3_phy_reset_5703_4_5(tp);
2674 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2675 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2676 cpmuctrl = tr32(TG3_CPMU_CTRL);
2677 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2679 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2682 err = tg3_bmcr_reset(tp);
2686 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2687 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2688 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2690 tw32(TG3_CPMU_CTRL, cpmuctrl);
2693 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2694 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2695 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2696 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2697 CPMU_LSPD_1000MB_MACCLK_12_5) {
2698 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2700 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2704 if (tg3_flag(tp, 5717_PLUS) &&
2705 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2708 tg3_phy_apply_otp(tp);
2710 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2711 tg3_phy_toggle_apd(tp, true);
2713 tg3_phy_toggle_apd(tp, false);
2716 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2717 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2718 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2719 tg3_phydsp_write(tp, 0x000a, 0x0323);
2720 tg3_phy_toggle_auxctl_smdsp(tp, false);
2723 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2724 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2725 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2728 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2729 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2730 tg3_phydsp_write(tp, 0x000a, 0x310b);
2731 tg3_phydsp_write(tp, 0x201f, 0x9506);
2732 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2733 tg3_phy_toggle_auxctl_smdsp(tp, false);
2735 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2736 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2737 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2738 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2739 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2740 tg3_writephy(tp, MII_TG3_TEST1,
2741 MII_TG3_TEST1_TRIM_EN | 0x4);
2743 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2745 tg3_phy_toggle_auxctl_smdsp(tp, false);
2749 /* Set Extended packet length bit (bit 14) on all chips that */
2750 /* support jumbo frames */
2751 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2752 /* Cannot do read-modify-write on 5401 */
2753 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2754 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2755 /* Set bit 14 with read-modify-write to preserve other bits */
2756 err = tg3_phy_auxctl_read(tp,
2757 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2759 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2760 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2763 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2764 * jumbo frames transmission.
2766 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2767 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2768 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2769 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2772 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2773 /* adjust output voltage */
2774 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2777 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2778 tg3_phydsp_write(tp, 0xffb, 0x4000);
2780 tg3_phy_toggle_automdix(tp, true);
2781 tg3_phy_set_wirespeed(tp);
2785 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2786 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2787 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2788 TG3_GPIO_MSG_NEED_VAUX)
2789 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2790 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2791 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2792 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2793 (TG3_GPIO_MSG_DRVR_PRES << 12))
2795 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2796 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2797 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2798 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2799 (TG3_GPIO_MSG_NEED_VAUX << 12))
2801 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2805 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 tg3_asic_rev(tp) == ASIC_REV_5719)
2807 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2809 status = tr32(TG3_CPMU_DRV_STATUS);
2811 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2812 status &= ~(TG3_GPIO_MSG_MASK << shift);
2813 status |= (newstat << shift);
2815 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2816 tg3_asic_rev(tp) == ASIC_REV_5719)
2817 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2819 tw32(TG3_CPMU_DRV_STATUS, status);
2821 return status >> TG3_APE_GPIO_MSG_SHIFT;
2824 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2826 if (!tg3_flag(tp, IS_NIC))
2829 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2830 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2831 tg3_asic_rev(tp) == ASIC_REV_5720) {
2832 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2835 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2837 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2838 TG3_GRC_LCLCTL_PWRSW_DELAY);
2840 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2842 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2843 TG3_GRC_LCLCTL_PWRSW_DELAY);
2849 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2853 if (!tg3_flag(tp, IS_NIC) ||
2854 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855 tg3_asic_rev(tp) == ASIC_REV_5701)
2858 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2860 tw32_wait_f(GRC_LOCAL_CTRL,
2861 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2862 TG3_GRC_LCLCTL_PWRSW_DELAY);
2864 tw32_wait_f(GRC_LOCAL_CTRL,
2866 TG3_GRC_LCLCTL_PWRSW_DELAY);
2868 tw32_wait_f(GRC_LOCAL_CTRL,
2869 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2873 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2875 if (!tg3_flag(tp, IS_NIC))
2878 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2879 tg3_asic_rev(tp) == ASIC_REV_5701) {
2880 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2881 (GRC_LCLCTRL_GPIO_OE0 |
2882 GRC_LCLCTRL_GPIO_OE1 |
2883 GRC_LCLCTRL_GPIO_OE2 |
2884 GRC_LCLCTRL_GPIO_OUTPUT0 |
2885 GRC_LCLCTRL_GPIO_OUTPUT1),
2886 TG3_GRC_LCLCTL_PWRSW_DELAY);
2887 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2888 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2889 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2890 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2891 GRC_LCLCTRL_GPIO_OE1 |
2892 GRC_LCLCTRL_GPIO_OE2 |
2893 GRC_LCLCTRL_GPIO_OUTPUT0 |
2894 GRC_LCLCTRL_GPIO_OUTPUT1 |
2896 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2897 TG3_GRC_LCLCTL_PWRSW_DELAY);
2899 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2900 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2901 TG3_GRC_LCLCTL_PWRSW_DELAY);
2903 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2904 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2905 TG3_GRC_LCLCTL_PWRSW_DELAY);
2908 u32 grc_local_ctrl = 0;
2910 /* Workaround to prevent overdrawing Amps. */
2911 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2912 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2913 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2915 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918 /* On 5753 and variants, GPIO2 cannot be used. */
2919 no_gpio2 = tp->nic_sram_data_cfg &
2920 NIC_SRAM_DATA_CFG_NO_GPIO2;
2922 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2923 GRC_LCLCTRL_GPIO_OE1 |
2924 GRC_LCLCTRL_GPIO_OE2 |
2925 GRC_LCLCTRL_GPIO_OUTPUT1 |
2926 GRC_LCLCTRL_GPIO_OUTPUT2;
2928 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2929 GRC_LCLCTRL_GPIO_OUTPUT2);
2931 tw32_wait_f(GRC_LOCAL_CTRL,
2932 tp->grc_local_ctrl | grc_local_ctrl,
2933 TG3_GRC_LCLCTL_PWRSW_DELAY);
2935 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2937 tw32_wait_f(GRC_LOCAL_CTRL,
2938 tp->grc_local_ctrl | grc_local_ctrl,
2939 TG3_GRC_LCLCTL_PWRSW_DELAY);
2942 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2943 tw32_wait_f(GRC_LOCAL_CTRL,
2944 tp->grc_local_ctrl | grc_local_ctrl,
2945 TG3_GRC_LCLCTL_PWRSW_DELAY);
2950 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2954 /* Serialize power state transitions */
2955 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2958 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2959 msg = TG3_GPIO_MSG_NEED_VAUX;
2961 msg = tg3_set_function_status(tp, msg);
2963 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2966 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2967 tg3_pwrsrc_switch_to_vaux(tp);
2969 tg3_pwrsrc_die_with_vmain(tp);
2972 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2975 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2977 bool need_vaux = false;
2979 /* The GPIOs do something completely different on 57765. */
2980 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2983 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2984 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2985 tg3_asic_rev(tp) == ASIC_REV_5720) {
2986 tg3_frob_aux_power_5717(tp, include_wol ?
2987 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2991 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2992 struct net_device *dev_peer;
2994 dev_peer = pci_get_drvdata(tp->pdev_peer);
2996 /* remove_one() may have been run on the peer. */
2998 struct tg3 *tp_peer = netdev_priv(dev_peer);
3000 if (tg3_flag(tp_peer, INIT_COMPLETE))
3003 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3004 tg3_flag(tp_peer, ENABLE_ASF))
3009 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3010 tg3_flag(tp, ENABLE_ASF))
3014 tg3_pwrsrc_switch_to_vaux(tp);
3016 tg3_pwrsrc_die_with_vmain(tp);
3019 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3021 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3023 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3024 if (speed != SPEED_10)
3026 } else if (speed == SPEED_10)
3032 static bool tg3_phy_power_bug(struct tg3 *tp)
3034 switch (tg3_asic_rev(tp)) {
3039 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3048 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3057 static bool tg3_phy_led_bug(struct tg3 *tp)
3059 switch (tg3_asic_rev(tp)) {
3062 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3071 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3075 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3078 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3079 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3080 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3081 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3084 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3085 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3086 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3091 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3093 val = tr32(GRC_MISC_CFG);
3094 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3097 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3099 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3102 tg3_writephy(tp, MII_ADVERTISE, 0);
3103 tg3_writephy(tp, MII_BMCR,
3104 BMCR_ANENABLE | BMCR_ANRESTART);
3106 tg3_writephy(tp, MII_TG3_FET_TEST,
3107 phytest | MII_TG3_FET_SHADOW_EN);
3108 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3109 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3111 MII_TG3_FET_SHDW_AUXMODE4,
3114 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3117 } else if (do_low_power) {
3118 if (!tg3_phy_led_bug(tp))
3119 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3120 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3122 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3123 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3124 MII_TG3_AUXCTL_PCTL_VREG_11V;
3125 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3128 /* The PHY should not be powered down on some chips because
3131 if (tg3_phy_power_bug(tp))
3134 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3135 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3136 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3137 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3138 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3139 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3142 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3145 /* tp->lock is held. */
3146 static int tg3_nvram_lock(struct tg3 *tp)
3148 if (tg3_flag(tp, NVRAM)) {
3151 if (tp->nvram_lock_cnt == 0) {
3152 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3153 for (i = 0; i < 8000; i++) {
3154 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3159 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3163 tp->nvram_lock_cnt++;
3168 /* tp->lock is held. */
3169 static void tg3_nvram_unlock(struct tg3 *tp)
3171 if (tg3_flag(tp, NVRAM)) {
3172 if (tp->nvram_lock_cnt > 0)
3173 tp->nvram_lock_cnt--;
3174 if (tp->nvram_lock_cnt == 0)
3175 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3179 /* tp->lock is held. */
3180 static void tg3_enable_nvram_access(struct tg3 *tp)
3182 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183 u32 nvaccess = tr32(NVRAM_ACCESS);
3185 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3189 /* tp->lock is held. */
3190 static void tg3_disable_nvram_access(struct tg3 *tp)
3192 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3193 u32 nvaccess = tr32(NVRAM_ACCESS);
3195 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3199 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3200 u32 offset, u32 *val)
3205 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3208 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3209 EEPROM_ADDR_DEVID_MASK |
3211 tw32(GRC_EEPROM_ADDR,
3213 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3214 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3215 EEPROM_ADDR_ADDR_MASK) |
3216 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3218 for (i = 0; i < 1000; i++) {
3219 tmp = tr32(GRC_EEPROM_ADDR);
3221 if (tmp & EEPROM_ADDR_COMPLETE)
3225 if (!(tmp & EEPROM_ADDR_COMPLETE))
3228 tmp = tr32(GRC_EEPROM_DATA);
3231 * The data will always be opposite the native endian
3232 * format. Perform a blind byteswap to compensate.
3239 #define NVRAM_CMD_TIMEOUT 10000
3241 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3245 tw32(NVRAM_CMD, nvram_cmd);
3246 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3247 usleep_range(10, 40);
3248 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3254 if (i == NVRAM_CMD_TIMEOUT)
3260 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3262 if (tg3_flag(tp, NVRAM) &&
3263 tg3_flag(tp, NVRAM_BUFFERED) &&
3264 tg3_flag(tp, FLASH) &&
3265 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266 (tp->nvram_jedecnum == JEDEC_ATMEL))
3268 addr = ((addr / tp->nvram_pagesize) <<
3269 ATMEL_AT45DB0X1B_PAGE_POS) +
3270 (addr % tp->nvram_pagesize);
3275 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3277 if (tg3_flag(tp, NVRAM) &&
3278 tg3_flag(tp, NVRAM_BUFFERED) &&
3279 tg3_flag(tp, FLASH) &&
3280 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3281 (tp->nvram_jedecnum == JEDEC_ATMEL))
3283 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3284 tp->nvram_pagesize) +
3285 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3290 /* NOTE: Data read in from NVRAM is byteswapped according to
3291 * the byteswapping settings for all other register accesses.
3292 * tg3 devices are BE devices, so on a BE machine, the data
3293 * returned will be exactly as it is seen in NVRAM. On a LE
3294 * machine, the 32-bit value will be byteswapped.
3296 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3300 if (!tg3_flag(tp, NVRAM))
3301 return tg3_nvram_read_using_eeprom(tp, offset, val);
3303 offset = tg3_nvram_phys_addr(tp, offset);
3305 if (offset > NVRAM_ADDR_MSK)
3308 ret = tg3_nvram_lock(tp);
3312 tg3_enable_nvram_access(tp);
3314 tw32(NVRAM_ADDR, offset);
3315 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3316 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3319 *val = tr32(NVRAM_RDDATA);
3321 tg3_disable_nvram_access(tp);
3323 tg3_nvram_unlock(tp);
3328 /* Ensures NVRAM data is in bytestream format. */
3329 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3332 int res = tg3_nvram_read(tp, offset, &v);
3334 *val = cpu_to_be32(v);
3338 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3339 u32 offset, u32 len, u8 *buf)
3344 for (i = 0; i < len; i += 4) {
3350 memcpy(&data, buf + i, 4);
3353 * The SEEPROM interface expects the data to always be opposite
3354 * the native endian format. We accomplish this by reversing
3355 * all the operations that would have been performed on the
3356 * data from a call to tg3_nvram_read_be32().
3358 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3360 val = tr32(GRC_EEPROM_ADDR);
3361 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3363 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3365 tw32(GRC_EEPROM_ADDR, val |
3366 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3367 (addr & EEPROM_ADDR_ADDR_MASK) |
3371 for (j = 0; j < 1000; j++) {
3372 val = tr32(GRC_EEPROM_ADDR);
3374 if (val & EEPROM_ADDR_COMPLETE)
3378 if (!(val & EEPROM_ADDR_COMPLETE)) {
3387 /* offset and length are dword aligned */
3388 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3392 u32 pagesize = tp->nvram_pagesize;
3393 u32 pagemask = pagesize - 1;
3397 tmp = kmalloc(pagesize, GFP_KERNEL);
3403 u32 phy_addr, page_off, size;
3405 phy_addr = offset & ~pagemask;
3407 for (j = 0; j < pagesize; j += 4) {
3408 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3409 (__be32 *) (tmp + j));
3416 page_off = offset & pagemask;
3423 memcpy(tmp + page_off, buf, size);
3425 offset = offset + (pagesize - page_off);
3427 tg3_enable_nvram_access(tp);
3430 * Before we can erase the flash page, we need
3431 * to issue a special "write enable" command.
3433 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3435 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3438 /* Erase the target page */
3439 tw32(NVRAM_ADDR, phy_addr);
3441 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3442 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3444 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3447 /* Issue another write enable to start the write. */
3448 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3450 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3453 for (j = 0; j < pagesize; j += 4) {
3456 data = *((__be32 *) (tmp + j));
3458 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3460 tw32(NVRAM_ADDR, phy_addr + j);
3462 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3466 nvram_cmd |= NVRAM_CMD_FIRST;
3467 else if (j == (pagesize - 4))
3468 nvram_cmd |= NVRAM_CMD_LAST;
3470 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3478 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3479 tg3_nvram_exec_cmd(tp, nvram_cmd);
3486 /* offset and length are dword aligned */
3487 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3492 for (i = 0; i < len; i += 4, offset += 4) {
3493 u32 page_off, phy_addr, nvram_cmd;
3496 memcpy(&data, buf + i, 4);
3497 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3499 page_off = offset % tp->nvram_pagesize;
3501 phy_addr = tg3_nvram_phys_addr(tp, offset);
3503 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3505 if (page_off == 0 || i == 0)
3506 nvram_cmd |= NVRAM_CMD_FIRST;
3507 if (page_off == (tp->nvram_pagesize - 4))
3508 nvram_cmd |= NVRAM_CMD_LAST;
3511 nvram_cmd |= NVRAM_CMD_LAST;
3513 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3514 !tg3_flag(tp, FLASH) ||
3515 !tg3_flag(tp, 57765_PLUS))
3516 tw32(NVRAM_ADDR, phy_addr);
3518 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3519 !tg3_flag(tp, 5755_PLUS) &&
3520 (tp->nvram_jedecnum == JEDEC_ST) &&
3521 (nvram_cmd & NVRAM_CMD_FIRST)) {
3524 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3525 ret = tg3_nvram_exec_cmd(tp, cmd);
3529 if (!tg3_flag(tp, FLASH)) {
3530 /* We always do complete word writes to eeprom. */
3531 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3534 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3541 /* offset and length are dword aligned */
3542 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3546 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3547 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3548 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3552 if (!tg3_flag(tp, NVRAM)) {
3553 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3557 ret = tg3_nvram_lock(tp);
3561 tg3_enable_nvram_access(tp);
3562 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3563 tw32(NVRAM_WRITE1, 0x406);
3565 grc_mode = tr32(GRC_MODE);
3566 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3568 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3569 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3572 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3576 grc_mode = tr32(GRC_MODE);
3577 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3579 tg3_disable_nvram_access(tp);
3580 tg3_nvram_unlock(tp);
3583 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3584 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3591 #define RX_CPU_SCRATCH_BASE 0x30000
3592 #define RX_CPU_SCRATCH_SIZE 0x04000
3593 #define TX_CPU_SCRATCH_BASE 0x34000
3594 #define TX_CPU_SCRATCH_SIZE 0x04000
3596 /* tp->lock is held. */
3597 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3600 const int iters = 10000;
3602 for (i = 0; i < iters; i++) {
3603 tw32(cpu_base + CPU_STATE, 0xffffffff);
3604 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3605 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3607 if (pci_channel_offline(tp->pdev))
3611 return (i == iters) ? -EBUSY : 0;
3614 /* tp->lock is held. */
3615 static int tg3_rxcpu_pause(struct tg3 *tp)
3617 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3619 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3620 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3626 /* tp->lock is held. */
3627 static int tg3_txcpu_pause(struct tg3 *tp)
3629 return tg3_pause_cpu(tp, TX_CPU_BASE);
3632 /* tp->lock is held. */
3633 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3639 /* tp->lock is held. */
3640 static void tg3_rxcpu_resume(struct tg3 *tp)
3642 tg3_resume_cpu(tp, RX_CPU_BASE);
3645 /* tp->lock is held. */
3646 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3650 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3652 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3653 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3655 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3658 if (cpu_base == RX_CPU_BASE) {
3659 rc = tg3_rxcpu_pause(tp);
3662 * There is only an Rx CPU for the 5750 derivative in the
3665 if (tg3_flag(tp, IS_SSB_CORE))
3668 rc = tg3_txcpu_pause(tp);
3672 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3673 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3677 /* Clear firmware's nvram arbitration. */
3678 if (tg3_flag(tp, NVRAM))
3679 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3683 static int tg3_fw_data_len(struct tg3 *tp,
3684 const struct tg3_firmware_hdr *fw_hdr)
3688 /* Non fragmented firmware have one firmware header followed by a
3689 * contiguous chunk of data to be written. The length field in that
3690 * header is not the length of data to be written but the complete
3691 * length of the bss. The data length is determined based on
3692 * tp->fw->size minus headers.
3694 * Fragmented firmware have a main header followed by multiple
3695 * fragments. Each fragment is identical to non fragmented firmware
3696 * with a firmware header followed by a contiguous chunk of data. In
3697 * the main header, the length field is unused and set to 0xffffffff.
3698 * In each fragment header the length is the entire size of that
3699 * fragment i.e. fragment data + header length. Data length is
3700 * therefore length field in the header minus TG3_FW_HDR_LEN.
3702 if (tp->fw_len == 0xffffffff)
3703 fw_len = be32_to_cpu(fw_hdr->len);
3705 fw_len = tp->fw->size;
3707 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3710 /* tp->lock is held. */
3711 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3712 u32 cpu_scratch_base, int cpu_scratch_size,
3713 const struct tg3_firmware_hdr *fw_hdr)
3716 void (*write_op)(struct tg3 *, u32, u32);
3717 int total_len = tp->fw->size;
3719 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3721 "%s: Trying to load TX cpu firmware which is 5705\n",
3726 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3727 write_op = tg3_write_mem;
3729 write_op = tg3_write_indirect_reg32;
3731 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3732 /* It is possible that bootcode is still loading at this point.
3733 * Get the nvram lock first before halting the cpu.
3735 int lock_err = tg3_nvram_lock(tp);
3736 err = tg3_halt_cpu(tp, cpu_base);
3738 tg3_nvram_unlock(tp);
3742 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3743 write_op(tp, cpu_scratch_base + i, 0);
3744 tw32(cpu_base + CPU_STATE, 0xffffffff);
3745 tw32(cpu_base + CPU_MODE,
3746 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3748 /* Subtract additional main header for fragmented firmware and
3749 * advance to the first fragment
3751 total_len -= TG3_FW_HDR_LEN;
3756 u32 *fw_data = (u32 *)(fw_hdr + 1);
3757 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3758 write_op(tp, cpu_scratch_base +
3759 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3761 be32_to_cpu(fw_data[i]));
3763 total_len -= be32_to_cpu(fw_hdr->len);
3765 /* Advance to next fragment */
3766 fw_hdr = (struct tg3_firmware_hdr *)
3767 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3768 } while (total_len > 0);
3776 /* tp->lock is held. */
3777 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3780 const int iters = 5;
3782 tw32(cpu_base + CPU_STATE, 0xffffffff);
3783 tw32_f(cpu_base + CPU_PC, pc);
3785 for (i = 0; i < iters; i++) {
3786 if (tr32(cpu_base + CPU_PC) == pc)
3788 tw32(cpu_base + CPU_STATE, 0xffffffff);
3789 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3790 tw32_f(cpu_base + CPU_PC, pc);
3794 return (i == iters) ? -EBUSY : 0;
3797 /* tp->lock is held. */
3798 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3800 const struct tg3_firmware_hdr *fw_hdr;
3803 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3805 /* Firmware blob starts with version numbers, followed by
3806 start address and length. We are setting complete length.
3807 length = end_address_of_bss - start_address_of_text.
3808 Remainder is the blob to be loaded contiguously
3809 from start address. */
3811 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3812 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3817 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3818 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3823 /* Now startup only the RX cpu. */
3824 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3825 be32_to_cpu(fw_hdr->base_addr));
3827 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3828 "should be %08x\n", __func__,
3829 tr32(RX_CPU_BASE + CPU_PC),
3830 be32_to_cpu(fw_hdr->base_addr));
3834 tg3_rxcpu_resume(tp);
3839 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3841 const int iters = 1000;
3845 /* Wait for boot code to complete initialization and enter service
3846 * loop. It is then safe to download service patches
3848 for (i = 0; i < iters; i++) {
3849 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3856 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3860 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3862 netdev_warn(tp->dev,
3863 "Other patches exist. Not downloading EEE patch\n");
3870 /* tp->lock is held. */
3871 static void tg3_load_57766_firmware(struct tg3 *tp)
3873 struct tg3_firmware_hdr *fw_hdr;
3875 if (!tg3_flag(tp, NO_NVRAM))
3878 if (tg3_validate_rxcpu_state(tp))
3884 /* This firmware blob has a different format than older firmware
3885 * releases as given below. The main difference is we have fragmented
3886 * data to be written to non-contiguous locations.
3888 * In the beginning we have a firmware header identical to other
3889 * firmware which consists of version, base addr and length. The length
3890 * here is unused and set to 0xffffffff.
3892 * This is followed by a series of firmware fragments which are
3893 * individually identical to previous firmware. i.e. they have the
3894 * firmware header and followed by data for that fragment. The version
3895 * field of the individual fragment header is unused.
3898 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3899 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3902 if (tg3_rxcpu_pause(tp))
3905 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3906 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3908 tg3_rxcpu_resume(tp);
3911 /* tp->lock is held. */
3912 static int tg3_load_tso_firmware(struct tg3 *tp)
3914 const struct tg3_firmware_hdr *fw_hdr;
3915 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3918 if (!tg3_flag(tp, FW_TSO))
3921 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3923 /* Firmware blob starts with version numbers, followed by
3924 start address and length. We are setting complete length.
3925 length = end_address_of_bss - start_address_of_text.
3926 Remainder is the blob to be loaded contiguously
3927 from start address. */
3929 cpu_scratch_size = tp->fw_len;
3931 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3932 cpu_base = RX_CPU_BASE;
3933 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3935 cpu_base = TX_CPU_BASE;
3936 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3937 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3940 err = tg3_load_firmware_cpu(tp, cpu_base,
3941 cpu_scratch_base, cpu_scratch_size,
3946 /* Now startup the cpu. */
3947 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3948 be32_to_cpu(fw_hdr->base_addr));
3951 "%s fails to set CPU PC, is %08x should be %08x\n",
3952 __func__, tr32(cpu_base + CPU_PC),
3953 be32_to_cpu(fw_hdr->base_addr));
3957 tg3_resume_cpu(tp, cpu_base);
3961 /* tp->lock is held. */
3962 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3964 u32 addr_high, addr_low;
3966 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3967 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3968 (mac_addr[4] << 8) | mac_addr[5]);
3971 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3972 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3975 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3976 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3980 /* tp->lock is held. */
3981 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3986 for (i = 0; i < 4; i++) {
3987 if (i == 1 && skip_mac_1)
3989 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3992 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3993 tg3_asic_rev(tp) == ASIC_REV_5704) {
3994 for (i = 4; i < 16; i++)
3995 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3998 addr_high = (tp->dev->dev_addr[0] +
3999 tp->dev->dev_addr[1] +
4000 tp->dev->dev_addr[2] +
4001 tp->dev->dev_addr[3] +
4002 tp->dev->dev_addr[4] +
4003 tp->dev->dev_addr[5]) &
4004 TX_BACKOFF_SEED_MASK;
4005 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4008 static void tg3_enable_register_access(struct tg3 *tp)
4011 * Make sure register accesses (indirect or otherwise) will function
4014 pci_write_config_dword(tp->pdev,
4015 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4018 static int tg3_power_up(struct tg3 *tp)
4022 tg3_enable_register_access(tp);
4024 err = pci_set_power_state(tp->pdev, PCI_D0);
4026 /* Switch out of Vaux if it is a NIC */
4027 tg3_pwrsrc_switch_to_vmain(tp);
4029 netdev_err(tp->dev, "Transition to D0 failed\n");
4035 static int tg3_setup_phy(struct tg3 *, bool);
4037 static int tg3_power_down_prepare(struct tg3 *tp)
4040 bool device_should_wake, do_low_power;
4042 tg3_enable_register_access(tp);
4044 /* Restore the CLKREQ setting. */
4045 if (tg3_flag(tp, CLKREQ_BUG))
4046 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4047 PCI_EXP_LNKCTL_CLKREQ_EN);
4049 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4050 tw32(TG3PCI_MISC_HOST_CTRL,
4051 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4053 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4054 tg3_flag(tp, WOL_ENABLE);
4056 if (tg3_flag(tp, USE_PHYLIB)) {
4057 do_low_power = false;
4058 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4059 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4060 struct phy_device *phydev;
4061 u32 phyid, advertising;
4063 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4065 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4067 tp->link_config.speed = phydev->speed;
4068 tp->link_config.duplex = phydev->duplex;
4069 tp->link_config.autoneg = phydev->autoneg;
4070 tp->link_config.advertising = phydev->advertising;
4072 advertising = ADVERTISED_TP |
4074 ADVERTISED_Autoneg |
4075 ADVERTISED_10baseT_Half;
4077 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4078 if (tg3_flag(tp, WOL_SPEED_100MB))
4080 ADVERTISED_100baseT_Half |
4081 ADVERTISED_100baseT_Full |
4082 ADVERTISED_10baseT_Full;
4084 advertising |= ADVERTISED_10baseT_Full;
4087 phydev->advertising = advertising;
4089 phy_start_aneg(phydev);
4091 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4092 if (phyid != PHY_ID_BCMAC131) {
4093 phyid &= PHY_BCM_OUI_MASK;
4094 if (phyid == PHY_BCM_OUI_1 ||
4095 phyid == PHY_BCM_OUI_2 ||
4096 phyid == PHY_BCM_OUI_3)
4097 do_low_power = true;
4101 do_low_power = true;
4103 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4104 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4106 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4107 tg3_setup_phy(tp, false);
4110 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4113 val = tr32(GRC_VCPU_EXT_CTRL);
4114 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4115 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4119 for (i = 0; i < 200; i++) {
4120 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4121 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4126 if (tg3_flag(tp, WOL_CAP))
4127 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4128 WOL_DRV_STATE_SHUTDOWN |
4132 if (device_should_wake) {
4135 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4137 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4138 tg3_phy_auxctl_write(tp,
4139 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4140 MII_TG3_AUXCTL_PCTL_WOL_EN |
4141 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4142 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4146 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4147 mac_mode = MAC_MODE_PORT_MODE_GMII;
4148 else if (tp->phy_flags &
4149 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4150 if (tp->link_config.active_speed == SPEED_1000)
4151 mac_mode = MAC_MODE_PORT_MODE_GMII;
4153 mac_mode = MAC_MODE_PORT_MODE_MII;
4155 mac_mode = MAC_MODE_PORT_MODE_MII;
4157 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4158 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4159 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4160 SPEED_100 : SPEED_10;
4161 if (tg3_5700_link_polarity(tp, speed))
4162 mac_mode |= MAC_MODE_LINK_POLARITY;
4164 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4167 mac_mode = MAC_MODE_PORT_MODE_TBI;
4170 if (!tg3_flag(tp, 5750_PLUS))
4171 tw32(MAC_LED_CTRL, tp->led_ctrl);
4173 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4174 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4175 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4176 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4178 if (tg3_flag(tp, ENABLE_APE))
4179 mac_mode |= MAC_MODE_APE_TX_EN |
4180 MAC_MODE_APE_RX_EN |
4181 MAC_MODE_TDE_ENABLE;
4183 tw32_f(MAC_MODE, mac_mode);
4186 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4190 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4191 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4192 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4195 base_val = tp->pci_clock_ctrl;
4196 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4197 CLOCK_CTRL_TXCLK_DISABLE);
4199 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4200 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4201 } else if (tg3_flag(tp, 5780_CLASS) ||
4202 tg3_flag(tp, CPMU_PRESENT) ||
4203 tg3_asic_rev(tp) == ASIC_REV_5906) {
4205 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4206 u32 newbits1, newbits2;
4208 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4209 tg3_asic_rev(tp) == ASIC_REV_5701) {
4210 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4211 CLOCK_CTRL_TXCLK_DISABLE |
4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 } else if (tg3_flag(tp, 5705_PLUS)) {
4215 newbits1 = CLOCK_CTRL_625_CORE;
4216 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4218 newbits1 = CLOCK_CTRL_ALTCLK;
4219 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4222 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4225 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4228 if (!tg3_flag(tp, 5705_PLUS)) {
4231 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4232 tg3_asic_rev(tp) == ASIC_REV_5701) {
4233 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4234 CLOCK_CTRL_TXCLK_DISABLE |
4235 CLOCK_CTRL_44MHZ_CORE);
4237 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4240 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4241 tp->pci_clock_ctrl | newbits3, 40);
4245 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4246 tg3_power_down_phy(tp, do_low_power);
4248 tg3_frob_aux_power(tp, true);
4250 /* Workaround for unstable PLL clock */
4251 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4252 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4253 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4254 u32 val = tr32(0x7d00);
4256 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4258 if (!tg3_flag(tp, ENABLE_ASF)) {
4261 err = tg3_nvram_lock(tp);
4262 tg3_halt_cpu(tp, RX_CPU_BASE);
4264 tg3_nvram_unlock(tp);
4268 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4270 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4275 static void tg3_power_down(struct tg3 *tp)
4277 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4278 pci_set_power_state(tp->pdev, PCI_D3hot);
4281 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4283 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4284 case MII_TG3_AUX_STAT_10HALF:
4286 *duplex = DUPLEX_HALF;
4289 case MII_TG3_AUX_STAT_10FULL:
4291 *duplex = DUPLEX_FULL;
4294 case MII_TG3_AUX_STAT_100HALF:
4296 *duplex = DUPLEX_HALF;
4299 case MII_TG3_AUX_STAT_100FULL:
4301 *duplex = DUPLEX_FULL;
4304 case MII_TG3_AUX_STAT_1000HALF:
4305 *speed = SPEED_1000;
4306 *duplex = DUPLEX_HALF;
4309 case MII_TG3_AUX_STAT_1000FULL:
4310 *speed = SPEED_1000;
4311 *duplex = DUPLEX_FULL;
4315 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4316 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4318 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4322 *speed = SPEED_UNKNOWN;
4323 *duplex = DUPLEX_UNKNOWN;
4328 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4333 new_adv = ADVERTISE_CSMA;
4334 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4335 new_adv |= mii_advertise_flowctrl(flowctrl);
4337 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4341 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4342 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4344 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4345 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4346 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4348 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4353 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4356 tw32(TG3_CPMU_EEE_MODE,
4357 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4359 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4364 /* Advertise 100-BaseTX EEE ability */
4365 if (advertise & ADVERTISED_100baseT_Full)
4366 val |= MDIO_AN_EEE_ADV_100TX;
4367 /* Advertise 1000-BaseT EEE ability */
4368 if (advertise & ADVERTISED_1000baseT_Full)
4369 val |= MDIO_AN_EEE_ADV_1000T;
4371 if (!tp->eee.eee_enabled) {
4373 tp->eee.advertised = 0;
4375 tp->eee.advertised = advertise &
4376 (ADVERTISED_100baseT_Full |
4377 ADVERTISED_1000baseT_Full);
4380 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4384 switch (tg3_asic_rev(tp)) {
4386 case ASIC_REV_57765:
4387 case ASIC_REV_57766:
4389 /* If we advertised any eee advertisements above... */
4391 val = MII_TG3_DSP_TAP26_ALNOKO |
4392 MII_TG3_DSP_TAP26_RMRXSTO |
4393 MII_TG3_DSP_TAP26_OPCSINPT;
4394 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4398 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4399 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4400 MII_TG3_DSP_CH34TP2_HIBW01);
4403 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4412 static void tg3_phy_copper_begin(struct tg3 *tp)
4414 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4415 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4418 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4419 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4420 adv = ADVERTISED_10baseT_Half |
4421 ADVERTISED_10baseT_Full;
4422 if (tg3_flag(tp, WOL_SPEED_100MB))
4423 adv |= ADVERTISED_100baseT_Half |
4424 ADVERTISED_100baseT_Full;
4425 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4426 if (!(tp->phy_flags &
4427 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4428 adv |= ADVERTISED_1000baseT_Half;
4429 adv |= ADVERTISED_1000baseT_Full;
4432 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4434 adv = tp->link_config.advertising;
4435 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4436 adv &= ~(ADVERTISED_1000baseT_Half |
4437 ADVERTISED_1000baseT_Full);
4439 fc = tp->link_config.flowctrl;
4442 tg3_phy_autoneg_cfg(tp, adv, fc);
4444 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4445 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4446 /* Normally during power down we want to autonegotiate
4447 * the lowest possible speed for WOL. However, to avoid
4448 * link flap, we leave it untouched.
4453 tg3_writephy(tp, MII_BMCR,
4454 BMCR_ANENABLE | BMCR_ANRESTART);
4457 u32 bmcr, orig_bmcr;
4459 tp->link_config.active_speed = tp->link_config.speed;
4460 tp->link_config.active_duplex = tp->link_config.duplex;
4462 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4463 /* With autoneg disabled, 5715 only links up when the
4464 * advertisement register has the configured speed
4467 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4471 switch (tp->link_config.speed) {
4477 bmcr |= BMCR_SPEED100;
4481 bmcr |= BMCR_SPEED1000;
4485 if (tp->link_config.duplex == DUPLEX_FULL)
4486 bmcr |= BMCR_FULLDPLX;
4488 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4489 (bmcr != orig_bmcr)) {
4490 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4491 for (i = 0; i < 1500; i++) {
4495 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4496 tg3_readphy(tp, MII_BMSR, &tmp))
4498 if (!(tmp & BMSR_LSTATUS)) {
4503 tg3_writephy(tp, MII_BMCR, bmcr);
4509 static int tg3_phy_pull_config(struct tg3 *tp)
4514 err = tg3_readphy(tp, MII_BMCR, &val);
4518 if (!(val & BMCR_ANENABLE)) {
4519 tp->link_config.autoneg = AUTONEG_DISABLE;
4520 tp->link_config.advertising = 0;
4521 tg3_flag_clear(tp, PAUSE_AUTONEG);
4525 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4527 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4530 tp->link_config.speed = SPEED_10;
4533 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4536 tp->link_config.speed = SPEED_100;
4538 case BMCR_SPEED1000:
4539 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4540 tp->link_config.speed = SPEED_1000;
4548 if (val & BMCR_FULLDPLX)
4549 tp->link_config.duplex = DUPLEX_FULL;
4551 tp->link_config.duplex = DUPLEX_HALF;
4553 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4559 tp->link_config.autoneg = AUTONEG_ENABLE;
4560 tp->link_config.advertising = ADVERTISED_Autoneg;
4561 tg3_flag_set(tp, PAUSE_AUTONEG);
4563 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4566 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4570 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4571 tp->link_config.advertising |= adv | ADVERTISED_TP;
4573 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4575 tp->link_config.advertising |= ADVERTISED_FIBRE;
4578 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4581 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4582 err = tg3_readphy(tp, MII_CTRL1000, &val);
4586 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4588 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4592 adv = tg3_decode_flowctrl_1000X(val);
4593 tp->link_config.flowctrl = adv;
4595 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4596 adv = mii_adv_to_ethtool_adv_x(val);
4599 tp->link_config.advertising |= adv;
4606 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4610 /* Turn off tap power management. */
4611 /* Set Extended packet length bit */
4612 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4614 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4615 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4616 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4617 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4618 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4625 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4627 struct ethtool_eee eee;
4629 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4632 tg3_eee_pull_config(tp, &eee);
4634 if (tp->eee.eee_enabled) {
4635 if (tp->eee.advertised != eee.advertised ||
4636 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4637 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4640 /* EEE is disabled but we're advertising */
4648 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4650 u32 advmsk, tgtadv, advertising;
4652 advertising = tp->link_config.advertising;
4653 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4655 advmsk = ADVERTISE_ALL;
4656 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4657 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4658 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4661 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4664 if ((*lcladv & advmsk) != tgtadv)
4667 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4670 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4672 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4676 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4677 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4678 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4679 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4680 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4682 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4685 if (tg3_ctrl != tgtadv)
4692 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4696 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4699 if (tg3_readphy(tp, MII_STAT1000, &val))
4702 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4705 if (tg3_readphy(tp, MII_LPA, rmtadv))
4708 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4709 tp->link_config.rmt_adv = lpeth;
4714 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4716 if (curr_link_up != tp->link_up) {
4718 netif_carrier_on(tp->dev);
4720 netif_carrier_off(tp->dev);
4721 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4722 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4725 tg3_link_report(tp);
4732 static void tg3_clear_mac_status(struct tg3 *tp)
4737 MAC_STATUS_SYNC_CHANGED |
4738 MAC_STATUS_CFG_CHANGED |
4739 MAC_STATUS_MI_COMPLETION |
4740 MAC_STATUS_LNKSTATE_CHANGED);
4744 static void tg3_setup_eee(struct tg3 *tp)
4748 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4749 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4750 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4751 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4753 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4755 tw32_f(TG3_CPMU_EEE_CTRL,
4756 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4758 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4759 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4760 TG3_CPMU_EEEMD_LPI_IN_RX |
4761 TG3_CPMU_EEEMD_EEE_ENABLE;
4763 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4764 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4766 if (tg3_flag(tp, ENABLE_APE))
4767 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4769 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4771 tw32_f(TG3_CPMU_EEE_DBTMR1,
4772 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4773 (tp->eee.tx_lpi_timer & 0xffff));
4775 tw32_f(TG3_CPMU_EEE_DBTMR2,
4776 TG3_CPMU_DBTMR2_APE_TX_2047US |
4777 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4780 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4782 bool current_link_up;
4784 u32 lcl_adv, rmt_adv;
4789 tg3_clear_mac_status(tp);
4791 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4793 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4797 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4799 /* Some third-party PHYs need to be reset on link going
4802 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4803 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4804 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4806 tg3_readphy(tp, MII_BMSR, &bmsr);
4807 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4808 !(bmsr & BMSR_LSTATUS))
4814 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4815 tg3_readphy(tp, MII_BMSR, &bmsr);
4816 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4817 !tg3_flag(tp, INIT_COMPLETE))
4820 if (!(bmsr & BMSR_LSTATUS)) {
4821 err = tg3_init_5401phy_dsp(tp);
4825 tg3_readphy(tp, MII_BMSR, &bmsr);
4826 for (i = 0; i < 1000; i++) {
4828 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4829 (bmsr & BMSR_LSTATUS)) {
4835 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4836 TG3_PHY_REV_BCM5401_B0 &&
4837 !(bmsr & BMSR_LSTATUS) &&
4838 tp->link_config.active_speed == SPEED_1000) {
4839 err = tg3_phy_reset(tp);
4841 err = tg3_init_5401phy_dsp(tp);
4846 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4847 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4848 /* 5701 {A0,B0} CRC bug workaround */
4849 tg3_writephy(tp, 0x15, 0x0a75);
4850 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4851 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4852 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4855 /* Clear pending interrupts... */
4856 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4857 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4859 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4860 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4861 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4862 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4864 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4865 tg3_asic_rev(tp) == ASIC_REV_5701) {
4866 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4867 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4868 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4870 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4873 current_link_up = false;
4874 current_speed = SPEED_UNKNOWN;
4875 current_duplex = DUPLEX_UNKNOWN;
4876 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4877 tp->link_config.rmt_adv = 0;
4879 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4880 err = tg3_phy_auxctl_read(tp,
4881 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4883 if (!err && !(val & (1 << 10))) {
4884 tg3_phy_auxctl_write(tp,
4885 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4892 for (i = 0; i < 100; i++) {
4893 tg3_readphy(tp, MII_BMSR, &bmsr);
4894 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4895 (bmsr & BMSR_LSTATUS))
4900 if (bmsr & BMSR_LSTATUS) {
4903 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4904 for (i = 0; i < 2000; i++) {
4906 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4911 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4916 for (i = 0; i < 200; i++) {
4917 tg3_readphy(tp, MII_BMCR, &bmcr);
4918 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4920 if (bmcr && bmcr != 0x7fff)
4928 tp->link_config.active_speed = current_speed;
4929 tp->link_config.active_duplex = current_duplex;
4931 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4932 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4934 if ((bmcr & BMCR_ANENABLE) &&
4936 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4937 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4938 current_link_up = true;
4940 /* EEE settings changes take effect only after a phy
4941 * reset. If we have skipped a reset due to Link Flap
4942 * Avoidance being enabled, do it now.
4944 if (!eee_config_ok &&
4945 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4951 if (!(bmcr & BMCR_ANENABLE) &&
4952 tp->link_config.speed == current_speed &&
4953 tp->link_config.duplex == current_duplex) {
4954 current_link_up = true;
4958 if (current_link_up &&
4959 tp->link_config.active_duplex == DUPLEX_FULL) {
4962 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4963 reg = MII_TG3_FET_GEN_STAT;
4964 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4966 reg = MII_TG3_EXT_STAT;
4967 bit = MII_TG3_EXT_STAT_MDIX;
4970 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4971 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4973 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4978 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4979 tg3_phy_copper_begin(tp);
4981 if (tg3_flag(tp, ROBOSWITCH)) {
4982 current_link_up = true;
4983 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4984 current_speed = SPEED_1000;
4985 current_duplex = DUPLEX_FULL;
4986 tp->link_config.active_speed = current_speed;
4987 tp->link_config.active_duplex = current_duplex;
4990 tg3_readphy(tp, MII_BMSR, &bmsr);
4991 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4992 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4993 current_link_up = true;
4996 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4997 if (current_link_up) {
4998 if (tp->link_config.active_speed == SPEED_100 ||
4999 tp->link_config.active_speed == SPEED_10)
5000 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5002 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5003 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5004 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5006 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5008 /* In order for the 5750 core in BCM4785 chip to work properly
5009 * in RGMII mode, the Led Control Register must be set up.
5011 if (tg3_flag(tp, RGMII_MODE)) {
5012 u32 led_ctrl = tr32(MAC_LED_CTRL);
5013 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5015 if (tp->link_config.active_speed == SPEED_10)
5016 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5017 else if (tp->link_config.active_speed == SPEED_100)
5018 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5019 LED_CTRL_100MBPS_ON);
5020 else if (tp->link_config.active_speed == SPEED_1000)
5021 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5022 LED_CTRL_1000MBPS_ON);
5024 tw32(MAC_LED_CTRL, led_ctrl);
5028 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5029 if (tp->link_config.active_duplex == DUPLEX_HALF)
5030 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5032 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5033 if (current_link_up &&
5034 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5035 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5037 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5040 /* ??? Without this setting Netgear GA302T PHY does not
5041 * ??? send/receive packets...
5043 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5044 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5045 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5046 tw32_f(MAC_MI_MODE, tp->mi_mode);
5050 tw32_f(MAC_MODE, tp->mac_mode);
5053 tg3_phy_eee_adjust(tp, current_link_up);
5055 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5056 /* Polled via timer. */
5057 tw32_f(MAC_EVENT, 0);
5059 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5063 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5065 tp->link_config.active_speed == SPEED_1000 &&
5066 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5069 (MAC_STATUS_SYNC_CHANGED |
5070 MAC_STATUS_CFG_CHANGED));
5073 NIC_SRAM_FIRMWARE_MBOX,
5074 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5077 /* Prevent send BD corruption. */
5078 if (tg3_flag(tp, CLKREQ_BUG)) {
5079 if (tp->link_config.active_speed == SPEED_100 ||
5080 tp->link_config.active_speed == SPEED_10)
5081 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5082 PCI_EXP_LNKCTL_CLKREQ_EN);
5084 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5085 PCI_EXP_LNKCTL_CLKREQ_EN);
5088 tg3_test_and_report_link_chg(tp, current_link_up);
5093 struct tg3_fiber_aneginfo {
5095 #define ANEG_STATE_UNKNOWN 0
5096 #define ANEG_STATE_AN_ENABLE 1
5097 #define ANEG_STATE_RESTART_INIT 2
5098 #define ANEG_STATE_RESTART 3
5099 #define ANEG_STATE_DISABLE_LINK_OK 4
5100 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5101 #define ANEG_STATE_ABILITY_DETECT 6
5102 #define ANEG_STATE_ACK_DETECT_INIT 7
5103 #define ANEG_STATE_ACK_DETECT 8
5104 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5105 #define ANEG_STATE_COMPLETE_ACK 10
5106 #define ANEG_STATE_IDLE_DETECT_INIT 11
5107 #define ANEG_STATE_IDLE_DETECT 12
5108 #define ANEG_STATE_LINK_OK 13
5109 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5110 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5113 #define MR_AN_ENABLE 0x00000001
5114 #define MR_RESTART_AN 0x00000002
5115 #define MR_AN_COMPLETE 0x00000004
5116 #define MR_PAGE_RX 0x00000008
5117 #define MR_NP_LOADED 0x00000010
5118 #define MR_TOGGLE_TX 0x00000020
5119 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5120 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5121 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5122 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5123 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5124 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5125 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5126 #define MR_TOGGLE_RX 0x00002000
5127 #define MR_NP_RX 0x00004000
5129 #define MR_LINK_OK 0x80000000
5131 unsigned long link_time, cur_time;
5133 u32 ability_match_cfg;
5134 int ability_match_count;
5136 char ability_match, idle_match, ack_match;
5138 u32 txconfig, rxconfig;
5139 #define ANEG_CFG_NP 0x00000080
5140 #define ANEG_CFG_ACK 0x00000040
5141 #define ANEG_CFG_RF2 0x00000020
5142 #define ANEG_CFG_RF1 0x00000010
5143 #define ANEG_CFG_PS2 0x00000001
5144 #define ANEG_CFG_PS1 0x00008000
5145 #define ANEG_CFG_HD 0x00004000
5146 #define ANEG_CFG_FD 0x00002000
5147 #define ANEG_CFG_INVAL 0x00001f06
5152 #define ANEG_TIMER_ENAB 2
5153 #define ANEG_FAILED -1
5155 #define ANEG_STATE_SETTLE_TIME 10000
5157 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5158 struct tg3_fiber_aneginfo *ap)
5161 unsigned long delta;
5165 if (ap->state == ANEG_STATE_UNKNOWN) {
5169 ap->ability_match_cfg = 0;
5170 ap->ability_match_count = 0;
5171 ap->ability_match = 0;
5177 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5178 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5180 if (rx_cfg_reg != ap->ability_match_cfg) {
5181 ap->ability_match_cfg = rx_cfg_reg;
5182 ap->ability_match = 0;
5183 ap->ability_match_count = 0;
5185 if (++ap->ability_match_count > 1) {
5186 ap->ability_match = 1;
5187 ap->ability_match_cfg = rx_cfg_reg;
5190 if (rx_cfg_reg & ANEG_CFG_ACK)
5198 ap->ability_match_cfg = 0;
5199 ap->ability_match_count = 0;
5200 ap->ability_match = 0;
5206 ap->rxconfig = rx_cfg_reg;
5209 switch (ap->state) {
5210 case ANEG_STATE_UNKNOWN:
5211 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5212 ap->state = ANEG_STATE_AN_ENABLE;
5215 case ANEG_STATE_AN_ENABLE:
5216 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5217 if (ap->flags & MR_AN_ENABLE) {
5220 ap->ability_match_cfg = 0;
5221 ap->ability_match_count = 0;
5222 ap->ability_match = 0;
5226 ap->state = ANEG_STATE_RESTART_INIT;
5228 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5232 case ANEG_STATE_RESTART_INIT:
5233 ap->link_time = ap->cur_time;
5234 ap->flags &= ~(MR_NP_LOADED);
5236 tw32(MAC_TX_AUTO_NEG, 0);
5237 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5238 tw32_f(MAC_MODE, tp->mac_mode);
5241 ret = ANEG_TIMER_ENAB;
5242 ap->state = ANEG_STATE_RESTART;
5245 case ANEG_STATE_RESTART:
5246 delta = ap->cur_time - ap->link_time;
5247 if (delta > ANEG_STATE_SETTLE_TIME)
5248 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5250 ret = ANEG_TIMER_ENAB;
5253 case ANEG_STATE_DISABLE_LINK_OK:
5257 case ANEG_STATE_ABILITY_DETECT_INIT:
5258 ap->flags &= ~(MR_TOGGLE_TX);
5259 ap->txconfig = ANEG_CFG_FD;
5260 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5261 if (flowctrl & ADVERTISE_1000XPAUSE)
5262 ap->txconfig |= ANEG_CFG_PS1;
5263 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5264 ap->txconfig |= ANEG_CFG_PS2;
5265 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5266 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5267 tw32_f(MAC_MODE, tp->mac_mode);
5270 ap->state = ANEG_STATE_ABILITY_DETECT;
5273 case ANEG_STATE_ABILITY_DETECT:
5274 if (ap->ability_match != 0 && ap->rxconfig != 0)
5275 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5278 case ANEG_STATE_ACK_DETECT_INIT:
5279 ap->txconfig |= ANEG_CFG_ACK;
5280 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5281 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5282 tw32_f(MAC_MODE, tp->mac_mode);
5285 ap->state = ANEG_STATE_ACK_DETECT;
5288 case ANEG_STATE_ACK_DETECT:
5289 if (ap->ack_match != 0) {
5290 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5291 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5292 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5294 ap->state = ANEG_STATE_AN_ENABLE;
5296 } else if (ap->ability_match != 0 &&
5297 ap->rxconfig == 0) {
5298 ap->state = ANEG_STATE_AN_ENABLE;
5302 case ANEG_STATE_COMPLETE_ACK_INIT:
5303 if (ap->rxconfig & ANEG_CFG_INVAL) {
5307 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5308 MR_LP_ADV_HALF_DUPLEX |
5309 MR_LP_ADV_SYM_PAUSE |
5310 MR_LP_ADV_ASYM_PAUSE |
5311 MR_LP_ADV_REMOTE_FAULT1 |
5312 MR_LP_ADV_REMOTE_FAULT2 |
5313 MR_LP_ADV_NEXT_PAGE |
5316 if (ap->rxconfig & ANEG_CFG_FD)
5317 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5318 if (ap->rxconfig & ANEG_CFG_HD)
5319 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5320 if (ap->rxconfig & ANEG_CFG_PS1)
5321 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5322 if (ap->rxconfig & ANEG_CFG_PS2)
5323 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5324 if (ap->rxconfig & ANEG_CFG_RF1)
5325 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5326 if (ap->rxconfig & ANEG_CFG_RF2)
5327 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5328 if (ap->rxconfig & ANEG_CFG_NP)
5329 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5331 ap->link_time = ap->cur_time;
5333 ap->flags ^= (MR_TOGGLE_TX);
5334 if (ap->rxconfig & 0x0008)
5335 ap->flags |= MR_TOGGLE_RX;
5336 if (ap->rxconfig & ANEG_CFG_NP)
5337 ap->flags |= MR_NP_RX;
5338 ap->flags |= MR_PAGE_RX;
5340 ap->state = ANEG_STATE_COMPLETE_ACK;
5341 ret = ANEG_TIMER_ENAB;
5344 case ANEG_STATE_COMPLETE_ACK:
5345 if (ap->ability_match != 0 &&
5346 ap->rxconfig == 0) {
5347 ap->state = ANEG_STATE_AN_ENABLE;
5350 delta = ap->cur_time - ap->link_time;
5351 if (delta > ANEG_STATE_SETTLE_TIME) {
5352 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5353 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5355 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5356 !(ap->flags & MR_NP_RX)) {
5357 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5365 case ANEG_STATE_IDLE_DETECT_INIT:
5366 ap->link_time = ap->cur_time;
5367 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5368 tw32_f(MAC_MODE, tp->mac_mode);
5371 ap->state = ANEG_STATE_IDLE_DETECT;
5372 ret = ANEG_TIMER_ENAB;
5375 case ANEG_STATE_IDLE_DETECT:
5376 if (ap->ability_match != 0 &&
5377 ap->rxconfig == 0) {
5378 ap->state = ANEG_STATE_AN_ENABLE;
5381 delta = ap->cur_time - ap->link_time;
5382 if (delta > ANEG_STATE_SETTLE_TIME) {
5383 /* XXX another gem from the Broadcom driver :( */
5384 ap->state = ANEG_STATE_LINK_OK;
5388 case ANEG_STATE_LINK_OK:
5389 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5393 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5394 /* ??? unimplemented */
5397 case ANEG_STATE_NEXT_PAGE_WAIT:
5398 /* ??? unimplemented */
5409 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5412 struct tg3_fiber_aneginfo aninfo;
5413 int status = ANEG_FAILED;
5417 tw32_f(MAC_TX_AUTO_NEG, 0);
5419 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5420 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5423 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5426 memset(&aninfo, 0, sizeof(aninfo));
5427 aninfo.flags |= MR_AN_ENABLE;
5428 aninfo.state = ANEG_STATE_UNKNOWN;
5429 aninfo.cur_time = 0;
5431 while (++tick < 195000) {
5432 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5433 if (status == ANEG_DONE || status == ANEG_FAILED)
5439 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5440 tw32_f(MAC_MODE, tp->mac_mode);
5443 *txflags = aninfo.txconfig;
5444 *rxflags = aninfo.flags;
5446 if (status == ANEG_DONE &&
5447 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5448 MR_LP_ADV_FULL_DUPLEX)))
5454 static void tg3_init_bcm8002(struct tg3 *tp)
5456 u32 mac_status = tr32(MAC_STATUS);
5459 /* Reset when initting first time or we have a link. */
5460 if (tg3_flag(tp, INIT_COMPLETE) &&
5461 !(mac_status & MAC_STATUS_PCS_SYNCED))
5464 /* Set PLL lock range. */
5465 tg3_writephy(tp, 0x16, 0x8007);
5468 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5470 /* Wait for reset to complete. */
5471 /* XXX schedule_timeout() ... */
5472 for (i = 0; i < 500; i++)
5475 /* Config mode; select PMA/Ch 1 regs. */
5476 tg3_writephy(tp, 0x10, 0x8411);
5478 /* Enable auto-lock and comdet, select txclk for tx. */
5479 tg3_writephy(tp, 0x11, 0x0a10);
5481 tg3_writephy(tp, 0x18, 0x00a0);
5482 tg3_writephy(tp, 0x16, 0x41ff);
5484 /* Assert and deassert POR. */
5485 tg3_writephy(tp, 0x13, 0x0400);
5487 tg3_writephy(tp, 0x13, 0x0000);
5489 tg3_writephy(tp, 0x11, 0x0a50);
5491 tg3_writephy(tp, 0x11, 0x0a10);
5493 /* Wait for signal to stabilize */
5494 /* XXX schedule_timeout() ... */
5495 for (i = 0; i < 15000; i++)
5498 /* Deselect the channel register so we can read the PHYID
5501 tg3_writephy(tp, 0x10, 0x8011);
5504 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5507 bool current_link_up;
5508 u32 sg_dig_ctrl, sg_dig_status;
5509 u32 serdes_cfg, expected_sg_dig_ctrl;
5510 int workaround, port_a;
5513 expected_sg_dig_ctrl = 0;
5516 current_link_up = false;
5518 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5519 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5521 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5524 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5525 /* preserve bits 20-23 for voltage regulator */
5526 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5529 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5531 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5532 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5534 u32 val = serdes_cfg;
5540 tw32_f(MAC_SERDES_CFG, val);
5543 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5545 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5546 tg3_setup_flow_control(tp, 0, 0);
5547 current_link_up = true;
5552 /* Want auto-negotiation. */
5553 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5555 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5556 if (flowctrl & ADVERTISE_1000XPAUSE)
5557 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5558 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5559 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5561 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5562 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5563 tp->serdes_counter &&
5564 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5565 MAC_STATUS_RCVD_CFG)) ==
5566 MAC_STATUS_PCS_SYNCED)) {
5567 tp->serdes_counter--;
5568 current_link_up = true;
5573 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5574 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5576 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5578 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5579 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5580 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5581 MAC_STATUS_SIGNAL_DET)) {
5582 sg_dig_status = tr32(SG_DIG_STATUS);
5583 mac_status = tr32(MAC_STATUS);
5585 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5586 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5587 u32 local_adv = 0, remote_adv = 0;
5589 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5590 local_adv |= ADVERTISE_1000XPAUSE;
5591 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5592 local_adv |= ADVERTISE_1000XPSE_ASYM;
5594 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5595 remote_adv |= LPA_1000XPAUSE;
5596 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5597 remote_adv |= LPA_1000XPAUSE_ASYM;
5599 tp->link_config.rmt_adv =
5600 mii_adv_to_ethtool_adv_x(remote_adv);
5602 tg3_setup_flow_control(tp, local_adv, remote_adv);
5603 current_link_up = true;
5604 tp->serdes_counter = 0;
5605 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5606 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5607 if (tp->serdes_counter)
5608 tp->serdes_counter--;
5611 u32 val = serdes_cfg;
5618 tw32_f(MAC_SERDES_CFG, val);
5621 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5624 /* Link parallel detection - link is up */
5625 /* only if we have PCS_SYNC and not */
5626 /* receiving config code words */
5627 mac_status = tr32(MAC_STATUS);
5628 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5629 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5630 tg3_setup_flow_control(tp, 0, 0);
5631 current_link_up = true;
5633 TG3_PHYFLG_PARALLEL_DETECT;
5634 tp->serdes_counter =
5635 SERDES_PARALLEL_DET_TIMEOUT;
5637 goto restart_autoneg;
5641 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5642 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5646 return current_link_up;
5649 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5651 bool current_link_up = false;
5653 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5656 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5657 u32 txflags, rxflags;
5660 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5661 u32 local_adv = 0, remote_adv = 0;
5663 if (txflags & ANEG_CFG_PS1)
5664 local_adv |= ADVERTISE_1000XPAUSE;
5665 if (txflags & ANEG_CFG_PS2)
5666 local_adv |= ADVERTISE_1000XPSE_ASYM;
5668 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5669 remote_adv |= LPA_1000XPAUSE;
5670 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5671 remote_adv |= LPA_1000XPAUSE_ASYM;
5673 tp->link_config.rmt_adv =
5674 mii_adv_to_ethtool_adv_x(remote_adv);
5676 tg3_setup_flow_control(tp, local_adv, remote_adv);
5678 current_link_up = true;
5680 for (i = 0; i < 30; i++) {
5683 (MAC_STATUS_SYNC_CHANGED |
5684 MAC_STATUS_CFG_CHANGED));
5686 if ((tr32(MAC_STATUS) &
5687 (MAC_STATUS_SYNC_CHANGED |
5688 MAC_STATUS_CFG_CHANGED)) == 0)
5692 mac_status = tr32(MAC_STATUS);
5693 if (!current_link_up &&
5694 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5695 !(mac_status & MAC_STATUS_RCVD_CFG))
5696 current_link_up = true;
5698 tg3_setup_flow_control(tp, 0, 0);
5700 /* Forcing 1000FD link up. */
5701 current_link_up = true;
5703 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5706 tw32_f(MAC_MODE, tp->mac_mode);
5711 return current_link_up;
5714 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5717 u16 orig_active_speed;
5718 u8 orig_active_duplex;
5720 bool current_link_up;
5723 orig_pause_cfg = tp->link_config.active_flowctrl;
5724 orig_active_speed = tp->link_config.active_speed;
5725 orig_active_duplex = tp->link_config.active_duplex;
5727 if (!tg3_flag(tp, HW_AUTONEG) &&
5729 tg3_flag(tp, INIT_COMPLETE)) {
5730 mac_status = tr32(MAC_STATUS);
5731 mac_status &= (MAC_STATUS_PCS_SYNCED |
5732 MAC_STATUS_SIGNAL_DET |
5733 MAC_STATUS_CFG_CHANGED |
5734 MAC_STATUS_RCVD_CFG);
5735 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5736 MAC_STATUS_SIGNAL_DET)) {
5737 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5738 MAC_STATUS_CFG_CHANGED));
5743 tw32_f(MAC_TX_AUTO_NEG, 0);
5745 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5746 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5747 tw32_f(MAC_MODE, tp->mac_mode);
5750 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5751 tg3_init_bcm8002(tp);
5753 /* Enable link change event even when serdes polling. */
5754 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5757 current_link_up = false;
5758 tp->link_config.rmt_adv = 0;
5759 mac_status = tr32(MAC_STATUS);
5761 if (tg3_flag(tp, HW_AUTONEG))
5762 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5764 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5766 tp->napi[0].hw_status->status =
5767 (SD_STATUS_UPDATED |
5768 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5770 for (i = 0; i < 100; i++) {
5771 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5772 MAC_STATUS_CFG_CHANGED));
5774 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5775 MAC_STATUS_CFG_CHANGED |
5776 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5780 mac_status = tr32(MAC_STATUS);
5781 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5782 current_link_up = false;
5783 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5784 tp->serdes_counter == 0) {
5785 tw32_f(MAC_MODE, (tp->mac_mode |
5786 MAC_MODE_SEND_CONFIGS));
5788 tw32_f(MAC_MODE, tp->mac_mode);
5792 if (current_link_up) {
5793 tp->link_config.active_speed = SPEED_1000;
5794 tp->link_config.active_duplex = DUPLEX_FULL;
5795 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5796 LED_CTRL_LNKLED_OVERRIDE |
5797 LED_CTRL_1000MBPS_ON));
5799 tp->link_config.active_speed = SPEED_UNKNOWN;
5800 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5801 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5802 LED_CTRL_LNKLED_OVERRIDE |
5803 LED_CTRL_TRAFFIC_OVERRIDE));
5806 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5807 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5808 if (orig_pause_cfg != now_pause_cfg ||
5809 orig_active_speed != tp->link_config.active_speed ||
5810 orig_active_duplex != tp->link_config.active_duplex)
5811 tg3_link_report(tp);
5817 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5821 u16 current_speed = SPEED_UNKNOWN;
5822 u8 current_duplex = DUPLEX_UNKNOWN;
5823 bool current_link_up = false;
5824 u32 local_adv, remote_adv, sgsr;
5826 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5827 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5828 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5829 (sgsr & SERDES_TG3_SGMII_MODE)) {
5834 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5836 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5837 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5839 current_link_up = true;
5840 if (sgsr & SERDES_TG3_SPEED_1000) {
5841 current_speed = SPEED_1000;
5842 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5843 } else if (sgsr & SERDES_TG3_SPEED_100) {
5844 current_speed = SPEED_100;
5845 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5847 current_speed = SPEED_10;
5848 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5851 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5852 current_duplex = DUPLEX_FULL;
5854 current_duplex = DUPLEX_HALF;
5857 tw32_f(MAC_MODE, tp->mac_mode);
5860 tg3_clear_mac_status(tp);
5862 goto fiber_setup_done;
5865 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5866 tw32_f(MAC_MODE, tp->mac_mode);
5869 tg3_clear_mac_status(tp);
5874 tp->link_config.rmt_adv = 0;
5876 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5877 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5878 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5879 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5880 bmsr |= BMSR_LSTATUS;
5882 bmsr &= ~BMSR_LSTATUS;
5885 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5887 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5888 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5889 /* do nothing, just check for link up at the end */
5890 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5893 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5894 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5895 ADVERTISE_1000XPAUSE |
5896 ADVERTISE_1000XPSE_ASYM |
5899 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5900 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5902 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5903 tg3_writephy(tp, MII_ADVERTISE, newadv);
5904 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5905 tg3_writephy(tp, MII_BMCR, bmcr);
5907 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5908 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5909 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5916 bmcr &= ~BMCR_SPEED1000;
5917 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5919 if (tp->link_config.duplex == DUPLEX_FULL)
5920 new_bmcr |= BMCR_FULLDPLX;
5922 if (new_bmcr != bmcr) {
5923 /* BMCR_SPEED1000 is a reserved bit that needs
5924 * to be set on write.
5926 new_bmcr |= BMCR_SPEED1000;
5928 /* Force a linkdown */
5932 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5933 adv &= ~(ADVERTISE_1000XFULL |
5934 ADVERTISE_1000XHALF |
5936 tg3_writephy(tp, MII_ADVERTISE, adv);
5937 tg3_writephy(tp, MII_BMCR, bmcr |
5941 tg3_carrier_off(tp);
5943 tg3_writephy(tp, MII_BMCR, new_bmcr);
5945 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5946 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5947 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5948 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5949 bmsr |= BMSR_LSTATUS;
5951 bmsr &= ~BMSR_LSTATUS;
5953 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5957 if (bmsr & BMSR_LSTATUS) {
5958 current_speed = SPEED_1000;
5959 current_link_up = true;
5960 if (bmcr & BMCR_FULLDPLX)
5961 current_duplex = DUPLEX_FULL;
5963 current_duplex = DUPLEX_HALF;
5968 if (bmcr & BMCR_ANENABLE) {
5971 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5972 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5973 common = local_adv & remote_adv;
5974 if (common & (ADVERTISE_1000XHALF |
5975 ADVERTISE_1000XFULL)) {
5976 if (common & ADVERTISE_1000XFULL)
5977 current_duplex = DUPLEX_FULL;
5979 current_duplex = DUPLEX_HALF;
5981 tp->link_config.rmt_adv =
5982 mii_adv_to_ethtool_adv_x(remote_adv);
5983 } else if (!tg3_flag(tp, 5780_CLASS)) {
5984 /* Link is up via parallel detect */
5986 current_link_up = false;
5992 if (current_link_up && current_duplex == DUPLEX_FULL)
5993 tg3_setup_flow_control(tp, local_adv, remote_adv);
5995 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5996 if (tp->link_config.active_duplex == DUPLEX_HALF)
5997 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5999 tw32_f(MAC_MODE, tp->mac_mode);
6002 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6004 tp->link_config.active_speed = current_speed;
6005 tp->link_config.active_duplex = current_duplex;
6007 tg3_test_and_report_link_chg(tp, current_link_up);
6011 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6013 if (tp->serdes_counter) {
6014 /* Give autoneg time to complete. */
6015 tp->serdes_counter--;
6020 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6023 tg3_readphy(tp, MII_BMCR, &bmcr);
6024 if (bmcr & BMCR_ANENABLE) {
6027 /* Select shadow register 0x1f */
6028 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6029 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6031 /* Select expansion interrupt status register */
6032 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6033 MII_TG3_DSP_EXP1_INT_STAT);
6034 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6035 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6037 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6038 /* We have signal detect and not receiving
6039 * config code words, link is up by parallel
6043 bmcr &= ~BMCR_ANENABLE;
6044 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6045 tg3_writephy(tp, MII_BMCR, bmcr);
6046 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6049 } else if (tp->link_up &&
6050 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6051 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6054 /* Select expansion interrupt status register */
6055 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6056 MII_TG3_DSP_EXP1_INT_STAT);
6057 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6061 /* Config code words received, turn on autoneg. */
6062 tg3_readphy(tp, MII_BMCR, &bmcr);
6063 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6065 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6071 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6076 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6077 err = tg3_setup_fiber_phy(tp, force_reset);
6078 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6079 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6081 err = tg3_setup_copper_phy(tp, force_reset);
6083 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6086 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6087 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6089 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6094 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6095 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6096 tw32(GRC_MISC_CFG, val);
6099 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6100 (6 << TX_LENGTHS_IPG_SHIFT);
6101 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6102 tg3_asic_rev(tp) == ASIC_REV_5762)
6103 val |= tr32(MAC_TX_LENGTHS) &
6104 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6105 TX_LENGTHS_CNT_DWN_VAL_MSK);
6107 if (tp->link_config.active_speed == SPEED_1000 &&
6108 tp->link_config.active_duplex == DUPLEX_HALF)
6109 tw32(MAC_TX_LENGTHS, val |
6110 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6112 tw32(MAC_TX_LENGTHS, val |
6113 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6115 if (!tg3_flag(tp, 5705_PLUS)) {
6117 tw32(HOSTCC_STAT_COAL_TICKS,
6118 tp->coal.stats_block_coalesce_usecs);
6120 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6124 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6125 val = tr32(PCIE_PWR_MGMT_THRESH);
6127 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6130 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6131 tw32(PCIE_PWR_MGMT_THRESH, val);
6137 /* tp->lock must be held */
6138 static u64 tg3_refclk_read(struct tg3 *tp)
6140 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6141 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6144 /* tp->lock must be held */
6145 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6147 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6149 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6150 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6151 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6152 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6155 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6156 static inline void tg3_full_unlock(struct tg3 *tp);
6157 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6159 struct tg3 *tp = netdev_priv(dev);
6161 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6162 SOF_TIMESTAMPING_RX_SOFTWARE |
6163 SOF_TIMESTAMPING_SOFTWARE;
6165 if (tg3_flag(tp, PTP_CAPABLE)) {
6166 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6167 SOF_TIMESTAMPING_RX_HARDWARE |
6168 SOF_TIMESTAMPING_RAW_HARDWARE;
6172 info->phc_index = ptp_clock_index(tp->ptp_clock);
6174 info->phc_index = -1;
6176 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6178 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6179 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6180 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6181 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6185 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6187 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6188 bool neg_adj = false;
6196 /* Frequency adjustment is performed using hardware with a 24 bit
6197 * accumulator and a programmable correction value. On each clk, the
6198 * correction value gets added to the accumulator and when it
6199 * overflows, the time counter is incremented/decremented.
6201 * So conversion from ppb to correction value is
6202 * ppb * (1 << 24) / 1000000000
6204 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6205 TG3_EAV_REF_CLK_CORRECT_MASK;
6207 tg3_full_lock(tp, 0);
6210 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6211 TG3_EAV_REF_CLK_CORRECT_EN |
6212 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6214 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6216 tg3_full_unlock(tp);
6221 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6223 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6225 tg3_full_lock(tp, 0);
6226 tp->ptp_adjust += delta;
6227 tg3_full_unlock(tp);
6232 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6235 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6237 tg3_full_lock(tp, 0);
6238 ns = tg3_refclk_read(tp);
6239 ns += tp->ptp_adjust;
6240 tg3_full_unlock(tp);
6242 *ts = ns_to_timespec64(ns);
6247 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6248 const struct timespec64 *ts)
6251 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6253 ns = timespec64_to_ns(ts);
6255 tg3_full_lock(tp, 0);
6256 tg3_refclk_write(tp, ns);
6258 tg3_full_unlock(tp);
6263 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6264 struct ptp_clock_request *rq, int on)
6266 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6271 case PTP_CLK_REQ_PEROUT:
6272 if (rq->perout.index != 0)
6275 tg3_full_lock(tp, 0);
6276 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6277 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6282 nsec = rq->perout.start.sec * 1000000000ULL +
6283 rq->perout.start.nsec;
6285 if (rq->perout.period.sec || rq->perout.period.nsec) {
6286 netdev_warn(tp->dev,
6287 "Device supports only a one-shot timesync output, period must be 0\n");
6292 if (nsec & (1ULL << 63)) {
6293 netdev_warn(tp->dev,
6294 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6299 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6300 tw32(TG3_EAV_WATCHDOG0_MSB,
6301 TG3_EAV_WATCHDOG0_EN |
6302 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6304 tw32(TG3_EAV_REF_CLCK_CTL,
6305 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6307 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6308 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6312 tg3_full_unlock(tp);
6322 static const struct ptp_clock_info tg3_ptp_caps = {
6323 .owner = THIS_MODULE,
6324 .name = "tg3 clock",
6325 .max_adj = 250000000,
6331 .adjfreq = tg3_ptp_adjfreq,
6332 .adjtime = tg3_ptp_adjtime,
6333 .gettime64 = tg3_ptp_gettime,
6334 .settime64 = tg3_ptp_settime,
6335 .enable = tg3_ptp_enable,
6338 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6339 struct skb_shared_hwtstamps *timestamp)
6341 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6342 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6346 /* tp->lock must be held */
6347 static void tg3_ptp_init(struct tg3 *tp)
6349 if (!tg3_flag(tp, PTP_CAPABLE))
6352 /* Initialize the hardware clock to the system time. */
6353 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6355 tp->ptp_info = tg3_ptp_caps;
6358 /* tp->lock must be held */
6359 static void tg3_ptp_resume(struct tg3 *tp)
6361 if (!tg3_flag(tp, PTP_CAPABLE))
6364 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6368 static void tg3_ptp_fini(struct tg3 *tp)
6370 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6373 ptp_clock_unregister(tp->ptp_clock);
6374 tp->ptp_clock = NULL;
6378 static inline int tg3_irq_sync(struct tg3 *tp)
6380 return tp->irq_sync;
6383 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6387 dst = (u32 *)((u8 *)dst + off);
6388 for (i = 0; i < len; i += sizeof(u32))
6389 *dst++ = tr32(off + i);
6392 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6394 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6395 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6396 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6397 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6398 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6399 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6400 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6401 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6402 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6403 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6404 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6405 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6406 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6407 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6408 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6409 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6410 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6411 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6412 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6414 if (tg3_flag(tp, SUPPORT_MSIX))
6415 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6417 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6418 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6419 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6420 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6421 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6422 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6423 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6424 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6426 if (!tg3_flag(tp, 5705_PLUS)) {
6427 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6428 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6429 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6432 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6433 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6434 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6435 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6436 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6438 if (tg3_flag(tp, NVRAM))
6439 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6442 static void tg3_dump_state(struct tg3 *tp)
6447 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6451 if (tg3_flag(tp, PCI_EXPRESS)) {
6452 /* Read up to but not including private PCI registers */
6453 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6454 regs[i / sizeof(u32)] = tr32(i);
6456 tg3_dump_legacy_regs(tp, regs);
6458 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6459 if (!regs[i + 0] && !regs[i + 1] &&
6460 !regs[i + 2] && !regs[i + 3])
6463 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6465 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6470 for (i = 0; i < tp->irq_cnt; i++) {
6471 struct tg3_napi *tnapi = &tp->napi[i];
6473 /* SW status block */
6475 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6477 tnapi->hw_status->status,
6478 tnapi->hw_status->status_tag,
6479 tnapi->hw_status->rx_jumbo_consumer,
6480 tnapi->hw_status->rx_consumer,
6481 tnapi->hw_status->rx_mini_consumer,
6482 tnapi->hw_status->idx[0].rx_producer,
6483 tnapi->hw_status->idx[0].tx_consumer);
6486 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6488 tnapi->last_tag, tnapi->last_irq_tag,
6489 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6491 tnapi->prodring.rx_std_prod_idx,
6492 tnapi->prodring.rx_std_cons_idx,
6493 tnapi->prodring.rx_jmb_prod_idx,
6494 tnapi->prodring.rx_jmb_cons_idx);
6498 /* This is called whenever we suspect that the system chipset is re-
6499 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6500 * is bogus tx completions. We try to recover by setting the
6501 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6504 static void tg3_tx_recover(struct tg3 *tp)
6506 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6507 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6509 netdev_warn(tp->dev,
6510 "The system may be re-ordering memory-mapped I/O "
6511 "cycles to the network device, attempting to recover. "
6512 "Please report the problem to the driver maintainer "
6513 "and include system chipset information.\n");
6515 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6518 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6520 /* Tell compiler to fetch tx indices from memory. */
6522 return tnapi->tx_pending -
6523 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6526 /* Tigon3 never reports partial packet sends. So we do not
6527 * need special logic to handle SKBs that have not had all
6528 * of their frags sent yet, like SunGEM does.
6530 static void tg3_tx(struct tg3_napi *tnapi)
6532 struct tg3 *tp = tnapi->tp;
6533 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6534 u32 sw_idx = tnapi->tx_cons;
6535 struct netdev_queue *txq;
6536 int index = tnapi - tp->napi;
6537 unsigned int pkts_compl = 0, bytes_compl = 0;
6539 if (tg3_flag(tp, ENABLE_TSS))
6542 txq = netdev_get_tx_queue(tp->dev, index);
6544 while (sw_idx != hw_idx) {
6545 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6546 struct sk_buff *skb = ri->skb;
6549 if (unlikely(skb == NULL)) {
6554 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6555 struct skb_shared_hwtstamps timestamp;
6556 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6557 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6559 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6561 skb_tstamp_tx(skb, ×tamp);
6564 pci_unmap_single(tp->pdev,
6565 dma_unmap_addr(ri, mapping),
6571 while (ri->fragmented) {
6572 ri->fragmented = false;
6573 sw_idx = NEXT_TX(sw_idx);
6574 ri = &tnapi->tx_buffers[sw_idx];
6577 sw_idx = NEXT_TX(sw_idx);
6579 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6580 ri = &tnapi->tx_buffers[sw_idx];
6581 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6584 pci_unmap_page(tp->pdev,
6585 dma_unmap_addr(ri, mapping),
6586 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6589 while (ri->fragmented) {
6590 ri->fragmented = false;
6591 sw_idx = NEXT_TX(sw_idx);
6592 ri = &tnapi->tx_buffers[sw_idx];
6595 sw_idx = NEXT_TX(sw_idx);
6599 bytes_compl += skb->len;
6601 dev_consume_skb_any(skb);
6603 if (unlikely(tx_bug)) {
6609 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6611 tnapi->tx_cons = sw_idx;
6613 /* Need to make the tx_cons update visible to tg3_start_xmit()
6614 * before checking for netif_queue_stopped(). Without the
6615 * memory barrier, there is a small possibility that tg3_start_xmit()
6616 * will miss it and cause the queue to be stopped forever.
6620 if (unlikely(netif_tx_queue_stopped(txq) &&
6621 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6622 __netif_tx_lock(txq, smp_processor_id());
6623 if (netif_tx_queue_stopped(txq) &&
6624 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6625 netif_tx_wake_queue(txq);
6626 __netif_tx_unlock(txq);
6630 static void tg3_frag_free(bool is_frag, void *data)
6633 skb_free_frag(data);
6638 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6640 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6641 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6646 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6647 map_sz, PCI_DMA_FROMDEVICE);
6648 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6653 /* Returns size of skb allocated or < 0 on error.
6655 * We only need to fill in the address because the other members
6656 * of the RX descriptor are invariant, see tg3_init_rings.
6658 * Note the purposeful assymetry of cpu vs. chip accesses. For
6659 * posting buffers we only dirty the first cache line of the RX
6660 * descriptor (containing the address). Whereas for the RX status
6661 * buffers the cpu only reads the last cacheline of the RX descriptor
6662 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6664 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6665 u32 opaque_key, u32 dest_idx_unmasked,
6666 unsigned int *frag_size)
6668 struct tg3_rx_buffer_desc *desc;
6669 struct ring_info *map;
6672 int skb_size, data_size, dest_idx;
6674 switch (opaque_key) {
6675 case RXD_OPAQUE_RING_STD:
6676 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6677 desc = &tpr->rx_std[dest_idx];
6678 map = &tpr->rx_std_buffers[dest_idx];
6679 data_size = tp->rx_pkt_map_sz;
6682 case RXD_OPAQUE_RING_JUMBO:
6683 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6684 desc = &tpr->rx_jmb[dest_idx].std;
6685 map = &tpr->rx_jmb_buffers[dest_idx];
6686 data_size = TG3_RX_JMB_MAP_SZ;
6693 /* Do not overwrite any of the map or rp information
6694 * until we are sure we can commit to a new buffer.
6696 * Callers depend upon this behavior and assume that
6697 * we leave everything unchanged if we fail.
6699 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6700 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6701 if (skb_size <= PAGE_SIZE) {
6702 data = netdev_alloc_frag(skb_size);
6703 *frag_size = skb_size;
6705 data = kmalloc(skb_size, GFP_ATOMIC);
6711 mapping = pci_map_single(tp->pdev,
6712 data + TG3_RX_OFFSET(tp),
6714 PCI_DMA_FROMDEVICE);
6715 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6716 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6721 dma_unmap_addr_set(map, mapping, mapping);
6723 desc->addr_hi = ((u64)mapping >> 32);
6724 desc->addr_lo = ((u64)mapping & 0xffffffff);
6729 /* We only need to move over in the address because the other
6730 * members of the RX descriptor are invariant. See notes above
6731 * tg3_alloc_rx_data for full details.
6733 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6734 struct tg3_rx_prodring_set *dpr,
6735 u32 opaque_key, int src_idx,
6736 u32 dest_idx_unmasked)
6738 struct tg3 *tp = tnapi->tp;
6739 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6740 struct ring_info *src_map, *dest_map;
6741 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6744 switch (opaque_key) {
6745 case RXD_OPAQUE_RING_STD:
6746 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6747 dest_desc = &dpr->rx_std[dest_idx];
6748 dest_map = &dpr->rx_std_buffers[dest_idx];
6749 src_desc = &spr->rx_std[src_idx];
6750 src_map = &spr->rx_std_buffers[src_idx];
6753 case RXD_OPAQUE_RING_JUMBO:
6754 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6755 dest_desc = &dpr->rx_jmb[dest_idx].std;
6756 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6757 src_desc = &spr->rx_jmb[src_idx].std;
6758 src_map = &spr->rx_jmb_buffers[src_idx];
6765 dest_map->data = src_map->data;
6766 dma_unmap_addr_set(dest_map, mapping,
6767 dma_unmap_addr(src_map, mapping));
6768 dest_desc->addr_hi = src_desc->addr_hi;
6769 dest_desc->addr_lo = src_desc->addr_lo;
6771 /* Ensure that the update to the skb happens after the physical
6772 * addresses have been transferred to the new BD location.
6776 src_map->data = NULL;
6779 /* The RX ring scheme is composed of multiple rings which post fresh
6780 * buffers to the chip, and one special ring the chip uses to report
6781 * status back to the host.
6783 * The special ring reports the status of received packets to the
6784 * host. The chip does not write into the original descriptor the
6785 * RX buffer was obtained from. The chip simply takes the original
6786 * descriptor as provided by the host, updates the status and length
6787 * field, then writes this into the next status ring entry.
6789 * Each ring the host uses to post buffers to the chip is described
6790 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6791 * it is first placed into the on-chip ram. When the packet's length
6792 * is known, it walks down the TG3_BDINFO entries to select the ring.
6793 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6794 * which is within the range of the new packet's length is chosen.
6796 * The "separate ring for rx status" scheme may sound queer, but it makes
6797 * sense from a cache coherency perspective. If only the host writes
6798 * to the buffer post rings, and only the chip writes to the rx status
6799 * rings, then cache lines never move beyond shared-modified state.
6800 * If both the host and chip were to write into the same ring, cache line
6801 * eviction could occur since both entities want it in an exclusive state.
6803 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6805 struct tg3 *tp = tnapi->tp;
6806 u32 work_mask, rx_std_posted = 0;
6807 u32 std_prod_idx, jmb_prod_idx;
6808 u32 sw_idx = tnapi->rx_rcb_ptr;
6811 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6813 hw_idx = *(tnapi->rx_rcb_prod_idx);
6815 * We need to order the read of hw_idx and the read of
6816 * the opaque cookie.
6821 std_prod_idx = tpr->rx_std_prod_idx;
6822 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6823 while (sw_idx != hw_idx && budget > 0) {
6824 struct ring_info *ri;
6825 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6827 struct sk_buff *skb;
6828 dma_addr_t dma_addr;
6829 u32 opaque_key, desc_idx, *post_ptr;
6833 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6834 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6835 if (opaque_key == RXD_OPAQUE_RING_STD) {
6836 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6837 dma_addr = dma_unmap_addr(ri, mapping);
6839 post_ptr = &std_prod_idx;
6841 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6842 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6843 dma_addr = dma_unmap_addr(ri, mapping);
6845 post_ptr = &jmb_prod_idx;
6847 goto next_pkt_nopost;
6849 work_mask |= opaque_key;
6851 if (desc->err_vlan & RXD_ERR_MASK) {
6853 tg3_recycle_rx(tnapi, tpr, opaque_key,
6854 desc_idx, *post_ptr);
6856 /* Other statistics kept track of by card. */
6861 prefetch(data + TG3_RX_OFFSET(tp));
6862 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6865 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6866 RXD_FLAG_PTPSTAT_PTPV1 ||
6867 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6868 RXD_FLAG_PTPSTAT_PTPV2) {
6869 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6870 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6873 if (len > TG3_RX_COPY_THRESH(tp)) {
6875 unsigned int frag_size;
6877 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6878 *post_ptr, &frag_size);
6882 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6883 PCI_DMA_FROMDEVICE);
6885 /* Ensure that the update to the data happens
6886 * after the usage of the old DMA mapping.
6892 skb = build_skb(data, frag_size);
6894 tg3_frag_free(frag_size != 0, data);
6895 goto drop_it_no_recycle;
6897 skb_reserve(skb, TG3_RX_OFFSET(tp));
6899 tg3_recycle_rx(tnapi, tpr, opaque_key,
6900 desc_idx, *post_ptr);
6902 skb = netdev_alloc_skb(tp->dev,
6903 len + TG3_RAW_IP_ALIGN);
6905 goto drop_it_no_recycle;
6907 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6908 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6910 data + TG3_RX_OFFSET(tp),
6912 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6917 tg3_hwclock_to_timestamp(tp, tstamp,
6918 skb_hwtstamps(skb));
6920 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6921 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6922 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6923 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6924 skb->ip_summed = CHECKSUM_UNNECESSARY;
6926 skb_checksum_none_assert(skb);
6928 skb->protocol = eth_type_trans(skb, tp->dev);
6930 if (len > (tp->dev->mtu + ETH_HLEN) &&
6931 skb->protocol != htons(ETH_P_8021Q) &&
6932 skb->protocol != htons(ETH_P_8021AD)) {
6933 dev_kfree_skb_any(skb);
6934 goto drop_it_no_recycle;
6937 if (desc->type_flags & RXD_FLAG_VLAN &&
6938 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6939 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6940 desc->err_vlan & RXD_VLAN_MASK);
6942 napi_gro_receive(&tnapi->napi, skb);
6950 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6951 tpr->rx_std_prod_idx = std_prod_idx &
6952 tp->rx_std_ring_mask;
6953 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6954 tpr->rx_std_prod_idx);
6955 work_mask &= ~RXD_OPAQUE_RING_STD;
6960 sw_idx &= tp->rx_ret_ring_mask;
6962 /* Refresh hw_idx to see if there is new work */
6963 if (sw_idx == hw_idx) {
6964 hw_idx = *(tnapi->rx_rcb_prod_idx);
6969 /* ACK the status ring. */
6970 tnapi->rx_rcb_ptr = sw_idx;
6971 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6973 /* Refill RX ring(s). */
6974 if (!tg3_flag(tp, ENABLE_RSS)) {
6975 /* Sync BD data before updating mailbox */
6978 if (work_mask & RXD_OPAQUE_RING_STD) {
6979 tpr->rx_std_prod_idx = std_prod_idx &
6980 tp->rx_std_ring_mask;
6981 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6982 tpr->rx_std_prod_idx);
6984 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6985 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6986 tp->rx_jmb_ring_mask;
6987 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6988 tpr->rx_jmb_prod_idx);
6991 } else if (work_mask) {
6992 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6993 * updated before the producer indices can be updated.
6997 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6998 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7000 if (tnapi != &tp->napi[1]) {
7001 tp->rx_refill = true;
7002 napi_schedule(&tp->napi[1].napi);
7009 static void tg3_poll_link(struct tg3 *tp)
7011 /* handle link change and other phy events */
7012 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7013 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7015 if (sblk->status & SD_STATUS_LINK_CHG) {
7016 sblk->status = SD_STATUS_UPDATED |
7017 (sblk->status & ~SD_STATUS_LINK_CHG);
7018 spin_lock(&tp->lock);
7019 if (tg3_flag(tp, USE_PHYLIB)) {
7021 (MAC_STATUS_SYNC_CHANGED |
7022 MAC_STATUS_CFG_CHANGED |
7023 MAC_STATUS_MI_COMPLETION |
7024 MAC_STATUS_LNKSTATE_CHANGED));
7027 tg3_setup_phy(tp, false);
7028 spin_unlock(&tp->lock);
7033 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7034 struct tg3_rx_prodring_set *dpr,
7035 struct tg3_rx_prodring_set *spr)
7037 u32 si, di, cpycnt, src_prod_idx;
7041 src_prod_idx = spr->rx_std_prod_idx;
7043 /* Make sure updates to the rx_std_buffers[] entries and the
7044 * standard producer index are seen in the correct order.
7048 if (spr->rx_std_cons_idx == src_prod_idx)
7051 if (spr->rx_std_cons_idx < src_prod_idx)
7052 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7054 cpycnt = tp->rx_std_ring_mask + 1 -
7055 spr->rx_std_cons_idx;
7057 cpycnt = min(cpycnt,
7058 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7060 si = spr->rx_std_cons_idx;
7061 di = dpr->rx_std_prod_idx;
7063 for (i = di; i < di + cpycnt; i++) {
7064 if (dpr->rx_std_buffers[i].data) {
7074 /* Ensure that updates to the rx_std_buffers ring and the
7075 * shadowed hardware producer ring from tg3_recycle_skb() are
7076 * ordered correctly WRT the skb check above.
7080 memcpy(&dpr->rx_std_buffers[di],
7081 &spr->rx_std_buffers[si],
7082 cpycnt * sizeof(struct ring_info));
7084 for (i = 0; i < cpycnt; i++, di++, si++) {
7085 struct tg3_rx_buffer_desc *sbd, *dbd;
7086 sbd = &spr->rx_std[si];
7087 dbd = &dpr->rx_std[di];
7088 dbd->addr_hi = sbd->addr_hi;
7089 dbd->addr_lo = sbd->addr_lo;
7092 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7093 tp->rx_std_ring_mask;
7094 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7095 tp->rx_std_ring_mask;
7099 src_prod_idx = spr->rx_jmb_prod_idx;
7101 /* Make sure updates to the rx_jmb_buffers[] entries and
7102 * the jumbo producer index are seen in the correct order.
7106 if (spr->rx_jmb_cons_idx == src_prod_idx)
7109 if (spr->rx_jmb_cons_idx < src_prod_idx)
7110 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7112 cpycnt = tp->rx_jmb_ring_mask + 1 -
7113 spr->rx_jmb_cons_idx;
7115 cpycnt = min(cpycnt,
7116 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7118 si = spr->rx_jmb_cons_idx;
7119 di = dpr->rx_jmb_prod_idx;
7121 for (i = di; i < di + cpycnt; i++) {
7122 if (dpr->rx_jmb_buffers[i].data) {
7132 /* Ensure that updates to the rx_jmb_buffers ring and the
7133 * shadowed hardware producer ring from tg3_recycle_skb() are
7134 * ordered correctly WRT the skb check above.
7138 memcpy(&dpr->rx_jmb_buffers[di],
7139 &spr->rx_jmb_buffers[si],
7140 cpycnt * sizeof(struct ring_info));
7142 for (i = 0; i < cpycnt; i++, di++, si++) {
7143 struct tg3_rx_buffer_desc *sbd, *dbd;
7144 sbd = &spr->rx_jmb[si].std;
7145 dbd = &dpr->rx_jmb[di].std;
7146 dbd->addr_hi = sbd->addr_hi;
7147 dbd->addr_lo = sbd->addr_lo;
7150 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7151 tp->rx_jmb_ring_mask;
7152 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7153 tp->rx_jmb_ring_mask;
7159 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7161 struct tg3 *tp = tnapi->tp;
7163 /* run TX completion thread */
7164 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7166 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7170 if (!tnapi->rx_rcb_prod_idx)
7173 /* run RX thread, within the bounds set by NAPI.
7174 * All RX "locking" is done by ensuring outside
7175 * code synchronizes with tg3->napi.poll()
7177 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7178 work_done += tg3_rx(tnapi, budget - work_done);
7180 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7181 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7183 u32 std_prod_idx = dpr->rx_std_prod_idx;
7184 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7186 tp->rx_refill = false;
7187 for (i = 1; i <= tp->rxq_cnt; i++)
7188 err |= tg3_rx_prodring_xfer(tp, dpr,
7189 &tp->napi[i].prodring);
7193 if (std_prod_idx != dpr->rx_std_prod_idx)
7194 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7195 dpr->rx_std_prod_idx);
7197 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7198 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7199 dpr->rx_jmb_prod_idx);
7204 tw32_f(HOSTCC_MODE, tp->coal_now);
7210 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7212 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7213 schedule_work(&tp->reset_task);
7216 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7218 cancel_work_sync(&tp->reset_task);
7219 tg3_flag_clear(tp, RESET_TASK_PENDING);
7220 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7223 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7225 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7226 struct tg3 *tp = tnapi->tp;
7228 struct tg3_hw_status *sblk = tnapi->hw_status;
7231 work_done = tg3_poll_work(tnapi, work_done, budget);
7233 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7236 if (unlikely(work_done >= budget))
7239 /* tp->last_tag is used in tg3_int_reenable() below
7240 * to tell the hw how much work has been processed,
7241 * so we must read it before checking for more work.
7243 tnapi->last_tag = sblk->status_tag;
7244 tnapi->last_irq_tag = tnapi->last_tag;
7247 /* check for RX/TX work to do */
7248 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7249 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7251 /* This test here is not race free, but will reduce
7252 * the number of interrupts by looping again.
7254 if (tnapi == &tp->napi[1] && tp->rx_refill)
7257 napi_complete_done(napi, work_done);
7258 /* Reenable interrupts. */
7259 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7261 /* This test here is synchronized by napi_schedule()
7262 * and napi_complete() to close the race condition.
7264 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7265 tw32(HOSTCC_MODE, tp->coalesce_mode |
7266 HOSTCC_MODE_ENABLE |
7274 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7278 /* work_done is guaranteed to be less than budget. */
7279 napi_complete(napi);
7280 tg3_reset_task_schedule(tp);
7284 static void tg3_process_error(struct tg3 *tp)
7287 bool real_error = false;
7289 if (tg3_flag(tp, ERROR_PROCESSED))
7292 /* Check Flow Attention register */
7293 val = tr32(HOSTCC_FLOW_ATTN);
7294 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7295 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7299 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7300 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7304 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7305 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7314 tg3_flag_set(tp, ERROR_PROCESSED);
7315 tg3_reset_task_schedule(tp);
7318 static int tg3_poll(struct napi_struct *napi, int budget)
7320 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7321 struct tg3 *tp = tnapi->tp;
7323 struct tg3_hw_status *sblk = tnapi->hw_status;
7326 if (sblk->status & SD_STATUS_ERROR)
7327 tg3_process_error(tp);
7331 work_done = tg3_poll_work(tnapi, work_done, budget);
7333 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7336 if (unlikely(work_done >= budget))
7339 if (tg3_flag(tp, TAGGED_STATUS)) {
7340 /* tp->last_tag is used in tg3_int_reenable() below
7341 * to tell the hw how much work has been processed,
7342 * so we must read it before checking for more work.
7344 tnapi->last_tag = sblk->status_tag;
7345 tnapi->last_irq_tag = tnapi->last_tag;
7348 sblk->status &= ~SD_STATUS_UPDATED;
7350 if (likely(!tg3_has_work(tnapi))) {
7351 napi_complete_done(napi, work_done);
7352 tg3_int_reenable(tnapi);
7357 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7361 /* work_done is guaranteed to be less than budget. */
7362 napi_complete(napi);
7363 tg3_reset_task_schedule(tp);
7367 static void tg3_napi_disable(struct tg3 *tp)
7371 for (i = tp->irq_cnt - 1; i >= 0; i--)
7372 napi_disable(&tp->napi[i].napi);
7375 static void tg3_napi_enable(struct tg3 *tp)
7379 for (i = 0; i < tp->irq_cnt; i++)
7380 napi_enable(&tp->napi[i].napi);
7383 static void tg3_napi_init(struct tg3 *tp)
7387 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7388 for (i = 1; i < tp->irq_cnt; i++)
7389 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7392 static void tg3_napi_fini(struct tg3 *tp)
7396 for (i = 0; i < tp->irq_cnt; i++)
7397 netif_napi_del(&tp->napi[i].napi);
7400 static inline void tg3_netif_stop(struct tg3 *tp)
7402 netif_trans_update(tp->dev); /* prevent tx timeout */
7403 tg3_napi_disable(tp);
7404 netif_carrier_off(tp->dev);
7405 netif_tx_disable(tp->dev);
7408 /* tp->lock must be held */
7409 static inline void tg3_netif_start(struct tg3 *tp)
7413 /* NOTE: unconditional netif_tx_wake_all_queues is only
7414 * appropriate so long as all callers are assured to
7415 * have free tx slots (such as after tg3_init_hw)
7417 netif_tx_wake_all_queues(tp->dev);
7420 netif_carrier_on(tp->dev);
7422 tg3_napi_enable(tp);
7423 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7424 tg3_enable_ints(tp);
7427 static void tg3_irq_quiesce(struct tg3 *tp)
7428 __releases(tp->lock)
7429 __acquires(tp->lock)
7433 BUG_ON(tp->irq_sync);
7438 spin_unlock_bh(&tp->lock);
7440 for (i = 0; i < tp->irq_cnt; i++)
7441 synchronize_irq(tp->napi[i].irq_vec);
7443 spin_lock_bh(&tp->lock);
7446 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7447 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7448 * with as well. Most of the time, this is not necessary except when
7449 * shutting down the device.
7451 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7453 spin_lock_bh(&tp->lock);
7455 tg3_irq_quiesce(tp);
7458 static inline void tg3_full_unlock(struct tg3 *tp)
7460 spin_unlock_bh(&tp->lock);
7463 /* One-shot MSI handler - Chip automatically disables interrupt
7464 * after sending MSI so driver doesn't have to do it.
7466 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7468 struct tg3_napi *tnapi = dev_id;
7469 struct tg3 *tp = tnapi->tp;
7471 prefetch(tnapi->hw_status);
7473 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7475 if (likely(!tg3_irq_sync(tp)))
7476 napi_schedule(&tnapi->napi);
7481 /* MSI ISR - No need to check for interrupt sharing and no need to
7482 * flush status block and interrupt mailbox. PCI ordering rules
7483 * guarantee that MSI will arrive after the status block.
7485 static irqreturn_t tg3_msi(int irq, void *dev_id)
7487 struct tg3_napi *tnapi = dev_id;
7488 struct tg3 *tp = tnapi->tp;
7490 prefetch(tnapi->hw_status);
7492 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7494 * Writing any value to intr-mbox-0 clears PCI INTA# and
7495 * chip-internal interrupt pending events.
7496 * Writing non-zero to intr-mbox-0 additional tells the
7497 * NIC to stop sending us irqs, engaging "in-intr-handler"
7500 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7501 if (likely(!tg3_irq_sync(tp)))
7502 napi_schedule(&tnapi->napi);
7504 return IRQ_RETVAL(1);
7507 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7509 struct tg3_napi *tnapi = dev_id;
7510 struct tg3 *tp = tnapi->tp;
7511 struct tg3_hw_status *sblk = tnapi->hw_status;
7512 unsigned int handled = 1;
7514 /* In INTx mode, it is possible for the interrupt to arrive at
7515 * the CPU before the status block posted prior to the interrupt.
7516 * Reading the PCI State register will confirm whether the
7517 * interrupt is ours and will flush the status block.
7519 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7520 if (tg3_flag(tp, CHIP_RESETTING) ||
7521 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7528 * Writing any value to intr-mbox-0 clears PCI INTA# and
7529 * chip-internal interrupt pending events.
7530 * Writing non-zero to intr-mbox-0 additional tells the
7531 * NIC to stop sending us irqs, engaging "in-intr-handler"
7534 * Flush the mailbox to de-assert the IRQ immediately to prevent
7535 * spurious interrupts. The flush impacts performance but
7536 * excessive spurious interrupts can be worse in some cases.
7538 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7539 if (tg3_irq_sync(tp))
7541 sblk->status &= ~SD_STATUS_UPDATED;
7542 if (likely(tg3_has_work(tnapi))) {
7543 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7544 napi_schedule(&tnapi->napi);
7546 /* No work, shared interrupt perhaps? re-enable
7547 * interrupts, and flush that PCI write
7549 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7553 return IRQ_RETVAL(handled);
7556 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7558 struct tg3_napi *tnapi = dev_id;
7559 struct tg3 *tp = tnapi->tp;
7560 struct tg3_hw_status *sblk = tnapi->hw_status;
7561 unsigned int handled = 1;
7563 /* In INTx mode, it is possible for the interrupt to arrive at
7564 * the CPU before the status block posted prior to the interrupt.
7565 * Reading the PCI State register will confirm whether the
7566 * interrupt is ours and will flush the status block.
7568 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7569 if (tg3_flag(tp, CHIP_RESETTING) ||
7570 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7577 * writing any value to intr-mbox-0 clears PCI INTA# and
7578 * chip-internal interrupt pending events.
7579 * writing non-zero to intr-mbox-0 additional tells the
7580 * NIC to stop sending us irqs, engaging "in-intr-handler"
7583 * Flush the mailbox to de-assert the IRQ immediately to prevent
7584 * spurious interrupts. The flush impacts performance but
7585 * excessive spurious interrupts can be worse in some cases.
7587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7590 * In a shared interrupt configuration, sometimes other devices'
7591 * interrupts will scream. We record the current status tag here
7592 * so that the above check can report that the screaming interrupts
7593 * are unhandled. Eventually they will be silenced.
7595 tnapi->last_irq_tag = sblk->status_tag;
7597 if (tg3_irq_sync(tp))
7600 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7602 napi_schedule(&tnapi->napi);
7605 return IRQ_RETVAL(handled);
7608 /* ISR for interrupt test */
7609 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7611 struct tg3_napi *tnapi = dev_id;
7612 struct tg3 *tp = tnapi->tp;
7613 struct tg3_hw_status *sblk = tnapi->hw_status;
7615 if ((sblk->status & SD_STATUS_UPDATED) ||
7616 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7617 tg3_disable_ints(tp);
7618 return IRQ_RETVAL(1);
7620 return IRQ_RETVAL(0);
7623 #ifdef CONFIG_NET_POLL_CONTROLLER
7624 static void tg3_poll_controller(struct net_device *dev)
7627 struct tg3 *tp = netdev_priv(dev);
7629 if (tg3_irq_sync(tp))
7632 for (i = 0; i < tp->irq_cnt; i++)
7633 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7637 static void tg3_tx_timeout(struct net_device *dev)
7639 struct tg3 *tp = netdev_priv(dev);
7641 if (netif_msg_tx_err(tp)) {
7642 netdev_err(dev, "transmit timed out, resetting\n");
7646 tg3_reset_task_schedule(tp);
7649 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7650 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7652 u32 base = (u32) mapping & 0xffffffff;
7654 return base + len + 8 < base;
7657 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7658 * of any 4GB boundaries: 4G, 8G, etc
7660 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7663 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7664 u32 base = (u32) mapping & 0xffffffff;
7666 return ((base + len + (mss & 0x3fff)) < base);
7671 /* Test for DMA addresses > 40-bit */
7672 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7675 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7676 if (tg3_flag(tp, 40BIT_DMA_BUG))
7677 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7684 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7685 dma_addr_t mapping, u32 len, u32 flags,
7688 txbd->addr_hi = ((u64) mapping >> 32);
7689 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7690 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7691 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7694 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7695 dma_addr_t map, u32 len, u32 flags,
7698 struct tg3 *tp = tnapi->tp;
7701 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7704 if (tg3_4g_overflow_test(map, len))
7707 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7710 if (tg3_40bit_overflow_test(tp, map, len))
7713 if (tp->dma_limit) {
7714 u32 prvidx = *entry;
7715 u32 tmp_flag = flags & ~TXD_FLAG_END;
7716 while (len > tp->dma_limit && *budget) {
7717 u32 frag_len = tp->dma_limit;
7718 len -= tp->dma_limit;
7720 /* Avoid the 8byte DMA problem */
7722 len += tp->dma_limit / 2;
7723 frag_len = tp->dma_limit / 2;
7726 tnapi->tx_buffers[*entry].fragmented = true;
7728 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7729 frag_len, tmp_flag, mss, vlan);
7732 *entry = NEXT_TX(*entry);
7739 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7740 len, flags, mss, vlan);
7742 *entry = NEXT_TX(*entry);
7745 tnapi->tx_buffers[prvidx].fragmented = false;
7749 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7750 len, flags, mss, vlan);
7751 *entry = NEXT_TX(*entry);
7757 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7760 struct sk_buff *skb;
7761 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7766 pci_unmap_single(tnapi->tp->pdev,
7767 dma_unmap_addr(txb, mapping),
7771 while (txb->fragmented) {
7772 txb->fragmented = false;
7773 entry = NEXT_TX(entry);
7774 txb = &tnapi->tx_buffers[entry];
7777 for (i = 0; i <= last; i++) {
7778 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7780 entry = NEXT_TX(entry);
7781 txb = &tnapi->tx_buffers[entry];
7783 pci_unmap_page(tnapi->tp->pdev,
7784 dma_unmap_addr(txb, mapping),
7785 skb_frag_size(frag), PCI_DMA_TODEVICE);
7787 while (txb->fragmented) {
7788 txb->fragmented = false;
7789 entry = NEXT_TX(entry);
7790 txb = &tnapi->tx_buffers[entry];
7795 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7796 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7797 struct sk_buff **pskb,
7798 u32 *entry, u32 *budget,
7799 u32 base_flags, u32 mss, u32 vlan)
7801 struct tg3 *tp = tnapi->tp;
7802 struct sk_buff *new_skb, *skb = *pskb;
7803 dma_addr_t new_addr = 0;
7806 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7807 new_skb = skb_copy(skb, GFP_ATOMIC);
7809 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7811 new_skb = skb_copy_expand(skb,
7812 skb_headroom(skb) + more_headroom,
7813 skb_tailroom(skb), GFP_ATOMIC);
7819 /* New SKB is guaranteed to be linear. */
7820 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7822 /* Make sure the mapping succeeded */
7823 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7824 dev_kfree_skb_any(new_skb);
7827 u32 save_entry = *entry;
7829 base_flags |= TXD_FLAG_END;
7831 tnapi->tx_buffers[*entry].skb = new_skb;
7832 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7835 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7836 new_skb->len, base_flags,
7838 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7839 dev_kfree_skb_any(new_skb);
7845 dev_consume_skb_any(skb);
7850 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7852 /* Check if we will never have enough descriptors,
7853 * as gso_segs can be more than current ring size
7855 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7858 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7860 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7861 * indicated in tg3_tx_frag_set()
7863 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7864 struct netdev_queue *txq, struct sk_buff *skb)
7866 struct sk_buff *segs, *nskb;
7867 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7869 /* Estimate the number of fragments in the worst case */
7870 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7871 netif_tx_stop_queue(txq);
7873 /* netif_tx_stop_queue() must be done before checking
7874 * checking tx index in tg3_tx_avail() below, because in
7875 * tg3_tx(), we update tx index before checking for
7876 * netif_tx_queue_stopped().
7879 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7880 return NETDEV_TX_BUSY;
7882 netif_tx_wake_queue(txq);
7885 segs = skb_gso_segment(skb, tp->dev->features &
7886 ~(NETIF_F_TSO | NETIF_F_TSO6));
7887 if (IS_ERR(segs) || !segs)
7888 goto tg3_tso_bug_end;
7894 tg3_start_xmit(nskb, tp->dev);
7898 dev_consume_skb_any(skb);
7900 return NETDEV_TX_OK;
7903 /* hard_start_xmit for all devices */
7904 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7906 struct tg3 *tp = netdev_priv(dev);
7907 u32 len, entry, base_flags, mss, vlan = 0;
7909 int i = -1, would_hit_hwbug;
7911 struct tg3_napi *tnapi;
7912 struct netdev_queue *txq;
7914 struct iphdr *iph = NULL;
7915 struct tcphdr *tcph = NULL;
7916 __sum16 tcp_csum = 0, ip_csum = 0;
7917 __be16 ip_tot_len = 0;
7919 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7920 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7921 if (tg3_flag(tp, ENABLE_TSS))
7924 budget = tg3_tx_avail(tnapi);
7926 /* We are running in BH disabled context with netif_tx_lock
7927 * and TX reclaim runs via tp->napi.poll inside of a software
7928 * interrupt. Furthermore, IRQ processing runs lockless so we have
7929 * no IRQ context deadlocks to worry about either. Rejoice!
7931 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7932 if (!netif_tx_queue_stopped(txq)) {
7933 netif_tx_stop_queue(txq);
7935 /* This is a hard error, log it. */
7937 "BUG! Tx Ring full when queue awake!\n");
7939 return NETDEV_TX_BUSY;
7942 entry = tnapi->tx_prod;
7945 mss = skb_shinfo(skb)->gso_size;
7947 u32 tcp_opt_len, hdr_len;
7949 if (skb_cow_head(skb, 0))
7953 tcp_opt_len = tcp_optlen(skb);
7955 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7957 /* HW/FW can not correctly segment packets that have been
7958 * vlan encapsulated.
7960 if (skb->protocol == htons(ETH_P_8021Q) ||
7961 skb->protocol == htons(ETH_P_8021AD)) {
7962 if (tg3_tso_bug_gso_check(tnapi, skb))
7963 return tg3_tso_bug(tp, tnapi, txq, skb);
7967 if (!skb_is_gso_v6(skb)) {
7968 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7969 tg3_flag(tp, TSO_BUG)) {
7970 if (tg3_tso_bug_gso_check(tnapi, skb))
7971 return tg3_tso_bug(tp, tnapi, txq, skb);
7974 ip_csum = iph->check;
7975 ip_tot_len = iph->tot_len;
7977 iph->tot_len = htons(mss + hdr_len);
7980 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7981 TXD_FLAG_CPU_POST_DMA);
7983 tcph = tcp_hdr(skb);
7984 tcp_csum = tcph->check;
7986 if (tg3_flag(tp, HW_TSO_1) ||
7987 tg3_flag(tp, HW_TSO_2) ||
7988 tg3_flag(tp, HW_TSO_3)) {
7990 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7992 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7996 if (tg3_flag(tp, HW_TSO_3)) {
7997 mss |= (hdr_len & 0xc) << 12;
7999 base_flags |= 0x00000010;
8000 base_flags |= (hdr_len & 0x3e0) << 5;
8001 } else if (tg3_flag(tp, HW_TSO_2))
8002 mss |= hdr_len << 9;
8003 else if (tg3_flag(tp, HW_TSO_1) ||
8004 tg3_asic_rev(tp) == ASIC_REV_5705) {
8005 if (tcp_opt_len || iph->ihl > 5) {
8008 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8009 mss |= (tsflags << 11);
8012 if (tcp_opt_len || iph->ihl > 5) {
8015 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8016 base_flags |= tsflags << 12;
8019 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8020 /* HW/FW can not correctly checksum packets that have been
8021 * vlan encapsulated.
8023 if (skb->protocol == htons(ETH_P_8021Q) ||
8024 skb->protocol == htons(ETH_P_8021AD)) {
8025 if (skb_checksum_help(skb))
8028 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8032 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8033 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8034 base_flags |= TXD_FLAG_JMB_PKT;
8036 if (skb_vlan_tag_present(skb)) {
8037 base_flags |= TXD_FLAG_VLAN;
8038 vlan = skb_vlan_tag_get(skb);
8041 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8042 tg3_flag(tp, TX_TSTAMP_EN)) {
8043 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8044 base_flags |= TXD_FLAG_HWTSTAMP;
8047 len = skb_headlen(skb);
8049 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8050 if (pci_dma_mapping_error(tp->pdev, mapping))
8054 tnapi->tx_buffers[entry].skb = skb;
8055 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8057 would_hit_hwbug = 0;
8059 if (tg3_flag(tp, 5701_DMA_BUG))
8060 would_hit_hwbug = 1;
8062 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8063 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8065 would_hit_hwbug = 1;
8066 } else if (skb_shinfo(skb)->nr_frags > 0) {
8069 if (!tg3_flag(tp, HW_TSO_1) &&
8070 !tg3_flag(tp, HW_TSO_2) &&
8071 !tg3_flag(tp, HW_TSO_3))
8074 /* Now loop through additional data
8075 * fragments, and queue them.
8077 last = skb_shinfo(skb)->nr_frags - 1;
8078 for (i = 0; i <= last; i++) {
8079 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8081 len = skb_frag_size(frag);
8082 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8083 len, DMA_TO_DEVICE);
8085 tnapi->tx_buffers[entry].skb = NULL;
8086 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8088 if (dma_mapping_error(&tp->pdev->dev, mapping))
8092 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8094 ((i == last) ? TXD_FLAG_END : 0),
8096 would_hit_hwbug = 1;
8102 if (would_hit_hwbug) {
8103 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8105 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8106 /* If it's a TSO packet, do GSO instead of
8107 * allocating and copying to a large linear SKB
8110 iph->check = ip_csum;
8111 iph->tot_len = ip_tot_len;
8113 tcph->check = tcp_csum;
8114 return tg3_tso_bug(tp, tnapi, txq, skb);
8117 /* If the workaround fails due to memory/mapping
8118 * failure, silently drop this packet.
8120 entry = tnapi->tx_prod;
8121 budget = tg3_tx_avail(tnapi);
8122 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8123 base_flags, mss, vlan))
8127 skb_tx_timestamp(skb);
8128 netdev_tx_sent_queue(txq, skb->len);
8130 /* Sync BD data before updating mailbox */
8133 tnapi->tx_prod = entry;
8134 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8135 netif_tx_stop_queue(txq);
8137 /* netif_tx_stop_queue() must be done before checking
8138 * checking tx index in tg3_tx_avail() below, because in
8139 * tg3_tx(), we update tx index before checking for
8140 * netif_tx_queue_stopped().
8143 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8144 netif_tx_wake_queue(txq);
8147 if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8148 /* Packets are ready, update Tx producer idx on card. */
8149 tw32_tx_mbox(tnapi->prodmbox, entry);
8153 return NETDEV_TX_OK;
8156 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8157 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8159 dev_kfree_skb_any(skb);
8162 return NETDEV_TX_OK;
8165 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8168 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8169 MAC_MODE_PORT_MODE_MASK);
8171 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8173 if (!tg3_flag(tp, 5705_PLUS))
8174 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8176 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8177 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8179 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8181 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8183 if (tg3_flag(tp, 5705_PLUS) ||
8184 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8185 tg3_asic_rev(tp) == ASIC_REV_5700)
8186 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8189 tw32(MAC_MODE, tp->mac_mode);
8193 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8195 u32 val, bmcr, mac_mode, ptest = 0;
8197 tg3_phy_toggle_apd(tp, false);
8198 tg3_phy_toggle_automdix(tp, false);
8200 if (extlpbk && tg3_phy_set_extloopbk(tp))
8203 bmcr = BMCR_FULLDPLX;
8208 bmcr |= BMCR_SPEED100;
8212 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8214 bmcr |= BMCR_SPEED100;
8217 bmcr |= BMCR_SPEED1000;
8222 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8223 tg3_readphy(tp, MII_CTRL1000, &val);
8224 val |= CTL1000_AS_MASTER |
8225 CTL1000_ENABLE_MASTER;
8226 tg3_writephy(tp, MII_CTRL1000, val);
8228 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8229 MII_TG3_FET_PTEST_TRIM_2;
8230 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8233 bmcr |= BMCR_LOOPBACK;
8235 tg3_writephy(tp, MII_BMCR, bmcr);
8237 /* The write needs to be flushed for the FETs */
8238 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8239 tg3_readphy(tp, MII_BMCR, &bmcr);
8243 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8244 tg3_asic_rev(tp) == ASIC_REV_5785) {
8245 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8246 MII_TG3_FET_PTEST_FRC_TX_LINK |
8247 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8249 /* The write needs to be flushed for the AC131 */
8250 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8253 /* Reset to prevent losing 1st rx packet intermittently */
8254 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8255 tg3_flag(tp, 5780_CLASS)) {
8256 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8258 tw32_f(MAC_RX_MODE, tp->rx_mode);
8261 mac_mode = tp->mac_mode &
8262 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8263 if (speed == SPEED_1000)
8264 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8266 mac_mode |= MAC_MODE_PORT_MODE_MII;
8268 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8269 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8271 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8272 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8273 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8274 mac_mode |= MAC_MODE_LINK_POLARITY;
8276 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8277 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8280 tw32(MAC_MODE, mac_mode);
8286 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8288 struct tg3 *tp = netdev_priv(dev);
8290 if (features & NETIF_F_LOOPBACK) {
8291 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8294 spin_lock_bh(&tp->lock);
8295 tg3_mac_loopback(tp, true);
8296 netif_carrier_on(tp->dev);
8297 spin_unlock_bh(&tp->lock);
8298 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8300 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8303 spin_lock_bh(&tp->lock);
8304 tg3_mac_loopback(tp, false);
8305 /* Force link status check */
8306 tg3_setup_phy(tp, true);
8307 spin_unlock_bh(&tp->lock);
8308 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8312 static netdev_features_t tg3_fix_features(struct net_device *dev,
8313 netdev_features_t features)
8315 struct tg3 *tp = netdev_priv(dev);
8317 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8318 features &= ~NETIF_F_ALL_TSO;
8323 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8325 netdev_features_t changed = dev->features ^ features;
8327 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8328 tg3_set_loopback(dev, features);
8333 static void tg3_rx_prodring_free(struct tg3 *tp,
8334 struct tg3_rx_prodring_set *tpr)
8338 if (tpr != &tp->napi[0].prodring) {
8339 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8340 i = (i + 1) & tp->rx_std_ring_mask)
8341 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8344 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8345 for (i = tpr->rx_jmb_cons_idx;
8346 i != tpr->rx_jmb_prod_idx;
8347 i = (i + 1) & tp->rx_jmb_ring_mask) {
8348 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8356 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8357 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8360 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8361 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8362 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8367 /* Initialize rx rings for packet processing.
8369 * The chip has been shut down and the driver detached from
8370 * the networking, so no interrupts or new tx packets will
8371 * end up in the driver. tp->{tx,}lock are held and thus
8374 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8375 struct tg3_rx_prodring_set *tpr)
8377 u32 i, rx_pkt_dma_sz;
8379 tpr->rx_std_cons_idx = 0;
8380 tpr->rx_std_prod_idx = 0;
8381 tpr->rx_jmb_cons_idx = 0;
8382 tpr->rx_jmb_prod_idx = 0;
8384 if (tpr != &tp->napi[0].prodring) {
8385 memset(&tpr->rx_std_buffers[0], 0,
8386 TG3_RX_STD_BUFF_RING_SIZE(tp));
8387 if (tpr->rx_jmb_buffers)
8388 memset(&tpr->rx_jmb_buffers[0], 0,
8389 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8393 /* Zero out all descriptors. */
8394 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8396 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8397 if (tg3_flag(tp, 5780_CLASS) &&
8398 tp->dev->mtu > ETH_DATA_LEN)
8399 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8400 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8402 /* Initialize invariants of the rings, we only set this
8403 * stuff once. This works because the card does not
8404 * write into the rx buffer posting rings.
8406 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8407 struct tg3_rx_buffer_desc *rxd;
8409 rxd = &tpr->rx_std[i];
8410 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8411 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8412 rxd->opaque = (RXD_OPAQUE_RING_STD |
8413 (i << RXD_OPAQUE_INDEX_SHIFT));
8416 /* Now allocate fresh SKBs for each rx ring. */
8417 for (i = 0; i < tp->rx_pending; i++) {
8418 unsigned int frag_size;
8420 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8422 netdev_warn(tp->dev,
8423 "Using a smaller RX standard ring. Only "
8424 "%d out of %d buffers were allocated "
8425 "successfully\n", i, tp->rx_pending);
8433 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8436 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8438 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8441 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8442 struct tg3_rx_buffer_desc *rxd;
8444 rxd = &tpr->rx_jmb[i].std;
8445 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8446 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8448 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8449 (i << RXD_OPAQUE_INDEX_SHIFT));
8452 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8453 unsigned int frag_size;
8455 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8457 netdev_warn(tp->dev,
8458 "Using a smaller RX jumbo ring. Only %d "
8459 "out of %d buffers were allocated "
8460 "successfully\n", i, tp->rx_jumbo_pending);
8463 tp->rx_jumbo_pending = i;
8472 tg3_rx_prodring_free(tp, tpr);
8476 static void tg3_rx_prodring_fini(struct tg3 *tp,
8477 struct tg3_rx_prodring_set *tpr)
8479 kfree(tpr->rx_std_buffers);
8480 tpr->rx_std_buffers = NULL;
8481 kfree(tpr->rx_jmb_buffers);
8482 tpr->rx_jmb_buffers = NULL;
8484 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8485 tpr->rx_std, tpr->rx_std_mapping);
8489 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8490 tpr->rx_jmb, tpr->rx_jmb_mapping);
8495 static int tg3_rx_prodring_init(struct tg3 *tp,
8496 struct tg3_rx_prodring_set *tpr)
8498 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8500 if (!tpr->rx_std_buffers)
8503 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8504 TG3_RX_STD_RING_BYTES(tp),
8505 &tpr->rx_std_mapping,
8510 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8511 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8513 if (!tpr->rx_jmb_buffers)
8516 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8517 TG3_RX_JMB_RING_BYTES(tp),
8518 &tpr->rx_jmb_mapping,
8527 tg3_rx_prodring_fini(tp, tpr);
8531 /* Free up pending packets in all rx/tx rings.
8533 * The chip has been shut down and the driver detached from
8534 * the networking, so no interrupts or new tx packets will
8535 * end up in the driver. tp->{tx,}lock is not held and we are not
8536 * in an interrupt context and thus may sleep.
8538 static void tg3_free_rings(struct tg3 *tp)
8542 for (j = 0; j < tp->irq_cnt; j++) {
8543 struct tg3_napi *tnapi = &tp->napi[j];
8545 tg3_rx_prodring_free(tp, &tnapi->prodring);
8547 if (!tnapi->tx_buffers)
8550 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8551 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8556 tg3_tx_skb_unmap(tnapi, i,
8557 skb_shinfo(skb)->nr_frags - 1);
8559 dev_consume_skb_any(skb);
8561 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8565 /* Initialize tx/rx rings for packet processing.
8567 * The chip has been shut down and the driver detached from
8568 * the networking, so no interrupts or new tx packets will
8569 * end up in the driver. tp->{tx,}lock are held and thus
8572 static int tg3_init_rings(struct tg3 *tp)
8576 /* Free up all the SKBs. */
8579 for (i = 0; i < tp->irq_cnt; i++) {
8580 struct tg3_napi *tnapi = &tp->napi[i];
8582 tnapi->last_tag = 0;
8583 tnapi->last_irq_tag = 0;
8584 tnapi->hw_status->status = 0;
8585 tnapi->hw_status->status_tag = 0;
8586 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8591 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8593 tnapi->rx_rcb_ptr = 0;
8595 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8597 if (tnapi->prodring.rx_std &&
8598 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8607 static void tg3_mem_tx_release(struct tg3 *tp)
8611 for (i = 0; i < tp->irq_max; i++) {
8612 struct tg3_napi *tnapi = &tp->napi[i];
8614 if (tnapi->tx_ring) {
8615 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8616 tnapi->tx_ring, tnapi->tx_desc_mapping);
8617 tnapi->tx_ring = NULL;
8620 kfree(tnapi->tx_buffers);
8621 tnapi->tx_buffers = NULL;
8625 static int tg3_mem_tx_acquire(struct tg3 *tp)
8628 struct tg3_napi *tnapi = &tp->napi[0];
8630 /* If multivector TSS is enabled, vector 0 does not handle
8631 * tx interrupts. Don't allocate any resources for it.
8633 if (tg3_flag(tp, ENABLE_TSS))
8636 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8637 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8638 sizeof(struct tg3_tx_ring_info),
8640 if (!tnapi->tx_buffers)
8643 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8645 &tnapi->tx_desc_mapping,
8647 if (!tnapi->tx_ring)
8654 tg3_mem_tx_release(tp);
8658 static void tg3_mem_rx_release(struct tg3 *tp)
8662 for (i = 0; i < tp->irq_max; i++) {
8663 struct tg3_napi *tnapi = &tp->napi[i];
8665 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8670 dma_free_coherent(&tp->pdev->dev,
8671 TG3_RX_RCB_RING_BYTES(tp),
8673 tnapi->rx_rcb_mapping);
8674 tnapi->rx_rcb = NULL;
8678 static int tg3_mem_rx_acquire(struct tg3 *tp)
8680 unsigned int i, limit;
8682 limit = tp->rxq_cnt;
8684 /* If RSS is enabled, we need a (dummy) producer ring
8685 * set on vector zero. This is the true hw prodring.
8687 if (tg3_flag(tp, ENABLE_RSS))
8690 for (i = 0; i < limit; i++) {
8691 struct tg3_napi *tnapi = &tp->napi[i];
8693 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8696 /* If multivector RSS is enabled, vector 0
8697 * does not handle rx or tx interrupts.
8698 * Don't allocate any resources for it.
8700 if (!i && tg3_flag(tp, ENABLE_RSS))
8703 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8704 TG3_RX_RCB_RING_BYTES(tp),
8705 &tnapi->rx_rcb_mapping,
8714 tg3_mem_rx_release(tp);
8719 * Must not be invoked with interrupt sources disabled and
8720 * the hardware shutdown down.
8722 static void tg3_free_consistent(struct tg3 *tp)
8726 for (i = 0; i < tp->irq_cnt; i++) {
8727 struct tg3_napi *tnapi = &tp->napi[i];
8729 if (tnapi->hw_status) {
8730 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8732 tnapi->status_mapping);
8733 tnapi->hw_status = NULL;
8737 tg3_mem_rx_release(tp);
8738 tg3_mem_tx_release(tp);
8740 /* tp->hw_stats can be referenced safely:
8741 * 1. under rtnl_lock
8742 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8745 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8746 tp->hw_stats, tp->stats_mapping);
8747 tp->hw_stats = NULL;
8752 * Must not be invoked with interrupt sources disabled and
8753 * the hardware shutdown down. Can sleep.
8755 static int tg3_alloc_consistent(struct tg3 *tp)
8759 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8760 sizeof(struct tg3_hw_stats),
8761 &tp->stats_mapping, GFP_KERNEL);
8765 for (i = 0; i < tp->irq_cnt; i++) {
8766 struct tg3_napi *tnapi = &tp->napi[i];
8767 struct tg3_hw_status *sblk;
8769 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8771 &tnapi->status_mapping,
8773 if (!tnapi->hw_status)
8776 sblk = tnapi->hw_status;
8778 if (tg3_flag(tp, ENABLE_RSS)) {
8779 u16 *prodptr = NULL;
8782 * When RSS is enabled, the status block format changes
8783 * slightly. The "rx_jumbo_consumer", "reserved",
8784 * and "rx_mini_consumer" members get mapped to the
8785 * other three rx return ring producer indexes.
8789 prodptr = &sblk->idx[0].rx_producer;
8792 prodptr = &sblk->rx_jumbo_consumer;
8795 prodptr = &sblk->reserved;
8798 prodptr = &sblk->rx_mini_consumer;
8801 tnapi->rx_rcb_prod_idx = prodptr;
8803 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8807 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8813 tg3_free_consistent(tp);
8817 #define MAX_WAIT_CNT 1000
8819 /* To stop a block, clear the enable bit and poll till it
8820 * clears. tp->lock is held.
8822 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8827 if (tg3_flag(tp, 5705_PLUS)) {
8834 /* We can't enable/disable these bits of the
8835 * 5705/5750, just say success.
8848 for (i = 0; i < MAX_WAIT_CNT; i++) {
8849 if (pci_channel_offline(tp->pdev)) {
8850 dev_err(&tp->pdev->dev,
8851 "tg3_stop_block device offline, "
8852 "ofs=%lx enable_bit=%x\n",
8859 if ((val & enable_bit) == 0)
8863 if (i == MAX_WAIT_CNT && !silent) {
8864 dev_err(&tp->pdev->dev,
8865 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8873 /* tp->lock is held. */
8874 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8878 tg3_disable_ints(tp);
8880 if (pci_channel_offline(tp->pdev)) {
8881 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8882 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8887 tp->rx_mode &= ~RX_MODE_ENABLE;
8888 tw32_f(MAC_RX_MODE, tp->rx_mode);
8891 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8892 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8893 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8894 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8895 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8896 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8898 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8899 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8900 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8901 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8902 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8903 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8904 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8906 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8907 tw32_f(MAC_MODE, tp->mac_mode);
8910 tp->tx_mode &= ~TX_MODE_ENABLE;
8911 tw32_f(MAC_TX_MODE, tp->tx_mode);
8913 for (i = 0; i < MAX_WAIT_CNT; i++) {
8915 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8918 if (i >= MAX_WAIT_CNT) {
8919 dev_err(&tp->pdev->dev,
8920 "%s timed out, TX_MODE_ENABLE will not clear "
8921 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8925 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8926 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8927 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8929 tw32(FTQ_RESET, 0xffffffff);
8930 tw32(FTQ_RESET, 0x00000000);
8932 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8933 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8936 for (i = 0; i < tp->irq_cnt; i++) {
8937 struct tg3_napi *tnapi = &tp->napi[i];
8938 if (tnapi->hw_status)
8939 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8945 /* Save PCI command register before chip reset */
8946 static void tg3_save_pci_state(struct tg3 *tp)
8948 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8951 /* Restore PCI state after chip reset */
8952 static void tg3_restore_pci_state(struct tg3 *tp)
8956 /* Re-enable indirect register accesses. */
8957 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8958 tp->misc_host_ctrl);
8960 /* Set MAX PCI retry to zero. */
8961 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8962 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8963 tg3_flag(tp, PCIX_MODE))
8964 val |= PCISTATE_RETRY_SAME_DMA;
8965 /* Allow reads and writes to the APE register and memory space. */
8966 if (tg3_flag(tp, ENABLE_APE))
8967 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8968 PCISTATE_ALLOW_APE_SHMEM_WR |
8969 PCISTATE_ALLOW_APE_PSPACE_WR;
8970 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8972 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8974 if (!tg3_flag(tp, PCI_EXPRESS)) {
8975 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8976 tp->pci_cacheline_sz);
8977 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8981 /* Make sure PCI-X relaxed ordering bit is clear. */
8982 if (tg3_flag(tp, PCIX_MODE)) {
8985 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8987 pcix_cmd &= ~PCI_X_CMD_ERO;
8988 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8992 if (tg3_flag(tp, 5780_CLASS)) {
8994 /* Chip reset on 5780 will reset MSI enable bit,
8995 * so need to restore it.
8997 if (tg3_flag(tp, USING_MSI)) {
9000 pci_read_config_word(tp->pdev,
9001 tp->msi_cap + PCI_MSI_FLAGS,
9003 pci_write_config_word(tp->pdev,
9004 tp->msi_cap + PCI_MSI_FLAGS,
9005 ctrl | PCI_MSI_FLAGS_ENABLE);
9006 val = tr32(MSGINT_MODE);
9007 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9012 static void tg3_override_clk(struct tg3 *tp)
9016 switch (tg3_asic_rev(tp)) {
9018 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9019 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9020 TG3_CPMU_MAC_ORIDE_ENABLE);
9025 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9033 static void tg3_restore_clk(struct tg3 *tp)
9037 switch (tg3_asic_rev(tp)) {
9039 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9040 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9041 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9046 val = tr32(TG3_CPMU_CLCK_ORIDE);
9047 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9055 /* tp->lock is held. */
9056 static int tg3_chip_reset(struct tg3 *tp)
9057 __releases(tp->lock)
9058 __acquires(tp->lock)
9061 void (*write_op)(struct tg3 *, u32, u32);
9064 if (!pci_device_is_present(tp->pdev))
9069 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9071 /* No matching tg3_nvram_unlock() after this because
9072 * chip reset below will undo the nvram lock.
9074 tp->nvram_lock_cnt = 0;
9076 /* GRC_MISC_CFG core clock reset will clear the memory
9077 * enable bit in PCI register 4 and the MSI enable bit
9078 * on some chips, so we save relevant registers here.
9080 tg3_save_pci_state(tp);
9082 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9083 tg3_flag(tp, 5755_PLUS))
9084 tw32(GRC_FASTBOOT_PC, 0);
9087 * We must avoid the readl() that normally takes place.
9088 * It locks machines, causes machine checks, and other
9089 * fun things. So, temporarily disable the 5701
9090 * hardware workaround, while we do the reset.
9092 write_op = tp->write32;
9093 if (write_op == tg3_write_flush_reg32)
9094 tp->write32 = tg3_write32;
9096 /* Prevent the irq handler from reading or writing PCI registers
9097 * during chip reset when the memory enable bit in the PCI command
9098 * register may be cleared. The chip does not generate interrupt
9099 * at this time, but the irq handler may still be called due to irq
9100 * sharing or irqpoll.
9102 tg3_flag_set(tp, CHIP_RESETTING);
9103 for (i = 0; i < tp->irq_cnt; i++) {
9104 struct tg3_napi *tnapi = &tp->napi[i];
9105 if (tnapi->hw_status) {
9106 tnapi->hw_status->status = 0;
9107 tnapi->hw_status->status_tag = 0;
9109 tnapi->last_tag = 0;
9110 tnapi->last_irq_tag = 0;
9114 tg3_full_unlock(tp);
9116 for (i = 0; i < tp->irq_cnt; i++)
9117 synchronize_irq(tp->napi[i].irq_vec);
9119 tg3_full_lock(tp, 0);
9121 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9122 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9123 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9127 val = GRC_MISC_CFG_CORECLK_RESET;
9129 if (tg3_flag(tp, PCI_EXPRESS)) {
9130 /* Force PCIe 1.0a mode */
9131 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9132 !tg3_flag(tp, 57765_PLUS) &&
9133 tr32(TG3_PCIE_PHY_TSTCTL) ==
9134 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9135 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9137 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9138 tw32(GRC_MISC_CFG, (1 << 29));
9143 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9144 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9145 tw32(GRC_VCPU_EXT_CTRL,
9146 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9149 /* Set the clock to the highest frequency to avoid timeouts. With link
9150 * aware mode, the clock speed could be slow and bootcode does not
9151 * complete within the expected time. Override the clock to allow the
9152 * bootcode to finish sooner and then restore it.
9154 tg3_override_clk(tp);
9156 /* Manage gphy power for all CPMU absent PCIe devices. */
9157 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9158 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9160 tw32(GRC_MISC_CFG, val);
9162 /* restore 5701 hardware bug workaround write method */
9163 tp->write32 = write_op;
9165 /* Unfortunately, we have to delay before the PCI read back.
9166 * Some 575X chips even will not respond to a PCI cfg access
9167 * when the reset command is given to the chip.
9169 * How do these hardware designers expect things to work
9170 * properly if the PCI write is posted for a long period
9171 * of time? It is always necessary to have some method by
9172 * which a register read back can occur to push the write
9173 * out which does the reset.
9175 * For most tg3 variants the trick below was working.
9180 /* Flush PCI posted writes. The normal MMIO registers
9181 * are inaccessible at this time so this is the only
9182 * way to make this reliably (actually, this is no longer
9183 * the case, see above). I tried to use indirect
9184 * register read/write but this upset some 5701 variants.
9186 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9190 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9193 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9197 /* Wait for link training to complete. */
9198 for (j = 0; j < 5000; j++)
9201 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9202 pci_write_config_dword(tp->pdev, 0xc4,
9203 cfg_val | (1 << 15));
9206 /* Clear the "no snoop" and "relaxed ordering" bits. */
9207 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9209 * Older PCIe devices only support the 128 byte
9210 * MPS setting. Enforce the restriction.
9212 if (!tg3_flag(tp, CPMU_PRESENT))
9213 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9214 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9216 /* Clear error status */
9217 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9218 PCI_EXP_DEVSTA_CED |
9219 PCI_EXP_DEVSTA_NFED |
9220 PCI_EXP_DEVSTA_FED |
9221 PCI_EXP_DEVSTA_URD);
9224 tg3_restore_pci_state(tp);
9226 tg3_flag_clear(tp, CHIP_RESETTING);
9227 tg3_flag_clear(tp, ERROR_PROCESSED);
9230 if (tg3_flag(tp, 5780_CLASS))
9231 val = tr32(MEMARB_MODE);
9232 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9234 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9236 tw32(0x5000, 0x400);
9239 if (tg3_flag(tp, IS_SSB_CORE)) {
9241 * BCM4785: In order to avoid repercussions from using
9242 * potentially defective internal ROM, stop the Rx RISC CPU,
9243 * which is not required.
9246 tg3_halt_cpu(tp, RX_CPU_BASE);
9249 err = tg3_poll_fw(tp);
9253 tw32(GRC_MODE, tp->grc_mode);
9255 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9258 tw32(0xc4, val | (1 << 15));
9261 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9262 tg3_asic_rev(tp) == ASIC_REV_5705) {
9263 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9264 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9265 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9266 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9269 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9270 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9272 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9273 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9278 tw32_f(MAC_MODE, val);
9281 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9285 if (tg3_flag(tp, PCI_EXPRESS) &&
9286 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9287 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9288 !tg3_flag(tp, 57765_PLUS)) {
9291 tw32(0x7c00, val | (1 << 25));
9294 tg3_restore_clk(tp);
9296 /* Increase the core clock speed to fix tx timeout issue for 5762
9297 * with 100Mbps link speed.
9299 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9300 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9301 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9302 TG3_CPMU_MAC_ORIDE_ENABLE);
9305 /* Reprobe ASF enable state. */
9306 tg3_flag_clear(tp, ENABLE_ASF);
9307 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9308 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9310 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9311 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9312 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9315 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9316 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9317 tg3_flag_set(tp, ENABLE_ASF);
9318 tp->last_event_jiffies = jiffies;
9319 if (tg3_flag(tp, 5750_PLUS))
9320 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9322 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9323 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9324 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9325 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9326 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9333 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9334 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9335 static void __tg3_set_rx_mode(struct net_device *);
9337 /* tp->lock is held. */
9338 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9344 tg3_write_sig_pre_reset(tp, kind);
9346 tg3_abort_hw(tp, silent);
9347 err = tg3_chip_reset(tp);
9349 __tg3_set_mac_addr(tp, false);
9351 tg3_write_sig_legacy(tp, kind);
9352 tg3_write_sig_post_reset(tp, kind);
9355 /* Save the stats across chip resets... */
9356 tg3_get_nstats(tp, &tp->net_stats_prev);
9357 tg3_get_estats(tp, &tp->estats_prev);
9359 /* And make sure the next sample is new data */
9360 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9366 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9368 struct tg3 *tp = netdev_priv(dev);
9369 struct sockaddr *addr = p;
9371 bool skip_mac_1 = false;
9373 if (!is_valid_ether_addr(addr->sa_data))
9374 return -EADDRNOTAVAIL;
9376 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9378 if (!netif_running(dev))
9381 if (tg3_flag(tp, ENABLE_ASF)) {
9382 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9384 addr0_high = tr32(MAC_ADDR_0_HIGH);
9385 addr0_low = tr32(MAC_ADDR_0_LOW);
9386 addr1_high = tr32(MAC_ADDR_1_HIGH);
9387 addr1_low = tr32(MAC_ADDR_1_LOW);
9389 /* Skip MAC addr 1 if ASF is using it. */
9390 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9391 !(addr1_high == 0 && addr1_low == 0))
9394 spin_lock_bh(&tp->lock);
9395 __tg3_set_mac_addr(tp, skip_mac_1);
9396 __tg3_set_rx_mode(dev);
9397 spin_unlock_bh(&tp->lock);
9402 /* tp->lock is held. */
9403 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9404 dma_addr_t mapping, u32 maxlen_flags,
9408 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9409 ((u64) mapping >> 32));
9411 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9412 ((u64) mapping & 0xffffffff));
9414 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9417 if (!tg3_flag(tp, 5705_PLUS))
9419 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9424 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9428 if (!tg3_flag(tp, ENABLE_TSS)) {
9429 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9430 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9431 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9433 tw32(HOSTCC_TXCOL_TICKS, 0);
9434 tw32(HOSTCC_TXMAX_FRAMES, 0);
9435 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9437 for (; i < tp->txq_cnt; i++) {
9440 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9441 tw32(reg, ec->tx_coalesce_usecs);
9442 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9443 tw32(reg, ec->tx_max_coalesced_frames);
9444 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9445 tw32(reg, ec->tx_max_coalesced_frames_irq);
9449 for (; i < tp->irq_max - 1; i++) {
9450 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9451 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9452 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9456 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9459 u32 limit = tp->rxq_cnt;
9461 if (!tg3_flag(tp, ENABLE_RSS)) {
9462 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9463 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9464 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9467 tw32(HOSTCC_RXCOL_TICKS, 0);
9468 tw32(HOSTCC_RXMAX_FRAMES, 0);
9469 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9472 for (; i < limit; i++) {
9475 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9476 tw32(reg, ec->rx_coalesce_usecs);
9477 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9478 tw32(reg, ec->rx_max_coalesced_frames);
9479 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9480 tw32(reg, ec->rx_max_coalesced_frames_irq);
9483 for (; i < tp->irq_max - 1; i++) {
9484 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9485 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9486 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9490 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9492 tg3_coal_tx_init(tp, ec);
9493 tg3_coal_rx_init(tp, ec);
9495 if (!tg3_flag(tp, 5705_PLUS)) {
9496 u32 val = ec->stats_block_coalesce_usecs;
9498 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9499 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9504 tw32(HOSTCC_STAT_COAL_TICKS, val);
9508 /* tp->lock is held. */
9509 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9513 /* Disable all transmit rings but the first. */
9514 if (!tg3_flag(tp, 5705_PLUS))
9515 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9516 else if (tg3_flag(tp, 5717_PLUS))
9517 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9518 else if (tg3_flag(tp, 57765_CLASS) ||
9519 tg3_asic_rev(tp) == ASIC_REV_5762)
9520 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9522 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9524 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9525 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9526 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9527 BDINFO_FLAGS_DISABLED);
9530 /* tp->lock is held. */
9531 static void tg3_tx_rcbs_init(struct tg3 *tp)
9534 u32 txrcb = NIC_SRAM_SEND_RCB;
9536 if (tg3_flag(tp, ENABLE_TSS))
9539 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9540 struct tg3_napi *tnapi = &tp->napi[i];
9542 if (!tnapi->tx_ring)
9545 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9546 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9547 NIC_SRAM_TX_BUFFER_DESC);
9551 /* tp->lock is held. */
9552 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9556 /* Disable all receive return rings but the first. */
9557 if (tg3_flag(tp, 5717_PLUS))
9558 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9559 else if (!tg3_flag(tp, 5705_PLUS))
9560 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9561 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9562 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9563 tg3_flag(tp, 57765_CLASS))
9564 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9566 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9568 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9569 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9570 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9571 BDINFO_FLAGS_DISABLED);
9574 /* tp->lock is held. */
9575 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9578 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9580 if (tg3_flag(tp, ENABLE_RSS))
9583 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9584 struct tg3_napi *tnapi = &tp->napi[i];
9589 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9590 (tp->rx_ret_ring_mask + 1) <<
9591 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9595 /* tp->lock is held. */
9596 static void tg3_rings_reset(struct tg3 *tp)
9600 struct tg3_napi *tnapi = &tp->napi[0];
9602 tg3_tx_rcbs_disable(tp);
9604 tg3_rx_ret_rcbs_disable(tp);
9606 /* Disable interrupts */
9607 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9608 tp->napi[0].chk_msi_cnt = 0;
9609 tp->napi[0].last_rx_cons = 0;
9610 tp->napi[0].last_tx_cons = 0;
9612 /* Zero mailbox registers. */
9613 if (tg3_flag(tp, SUPPORT_MSIX)) {
9614 for (i = 1; i < tp->irq_max; i++) {
9615 tp->napi[i].tx_prod = 0;
9616 tp->napi[i].tx_cons = 0;
9617 if (tg3_flag(tp, ENABLE_TSS))
9618 tw32_mailbox(tp->napi[i].prodmbox, 0);
9619 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9620 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9621 tp->napi[i].chk_msi_cnt = 0;
9622 tp->napi[i].last_rx_cons = 0;
9623 tp->napi[i].last_tx_cons = 0;
9625 if (!tg3_flag(tp, ENABLE_TSS))
9626 tw32_mailbox(tp->napi[0].prodmbox, 0);
9628 tp->napi[0].tx_prod = 0;
9629 tp->napi[0].tx_cons = 0;
9630 tw32_mailbox(tp->napi[0].prodmbox, 0);
9631 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9634 /* Make sure the NIC-based send BD rings are disabled. */
9635 if (!tg3_flag(tp, 5705_PLUS)) {
9636 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9637 for (i = 0; i < 16; i++)
9638 tw32_tx_mbox(mbox + i * 8, 0);
9641 /* Clear status block in ram. */
9642 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9644 /* Set status block DMA address */
9645 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9646 ((u64) tnapi->status_mapping >> 32));
9647 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9648 ((u64) tnapi->status_mapping & 0xffffffff));
9650 stblk = HOSTCC_STATBLCK_RING1;
9652 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9653 u64 mapping = (u64)tnapi->status_mapping;
9654 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9655 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9658 /* Clear status block in ram. */
9659 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9662 tg3_tx_rcbs_init(tp);
9663 tg3_rx_ret_rcbs_init(tp);
9666 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9668 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9670 if (!tg3_flag(tp, 5750_PLUS) ||
9671 tg3_flag(tp, 5780_CLASS) ||
9672 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9673 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9674 tg3_flag(tp, 57765_PLUS))
9675 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9676 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9677 tg3_asic_rev(tp) == ASIC_REV_5787)
9678 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9680 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9682 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9683 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9685 val = min(nic_rep_thresh, host_rep_thresh);
9686 tw32(RCVBDI_STD_THRESH, val);
9688 if (tg3_flag(tp, 57765_PLUS))
9689 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9691 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9694 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9696 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9698 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9699 tw32(RCVBDI_JUMBO_THRESH, val);
9701 if (tg3_flag(tp, 57765_PLUS))
9702 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9705 static inline u32 calc_crc(unsigned char *buf, int len)
9713 for (j = 0; j < len; j++) {
9716 for (k = 0; k < 8; k++) {
9722 reg ^= CRC32_POLY_LE;
9729 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9731 /* accept or reject all multicast frames */
9732 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9733 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9734 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9735 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9738 static void __tg3_set_rx_mode(struct net_device *dev)
9740 struct tg3 *tp = netdev_priv(dev);
9743 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9744 RX_MODE_KEEP_VLAN_TAG);
9746 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9747 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9750 if (!tg3_flag(tp, ENABLE_ASF))
9751 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9754 if (dev->flags & IFF_PROMISC) {
9755 /* Promiscuous mode. */
9756 rx_mode |= RX_MODE_PROMISC;
9757 } else if (dev->flags & IFF_ALLMULTI) {
9758 /* Accept all multicast. */
9759 tg3_set_multi(tp, 1);
9760 } else if (netdev_mc_empty(dev)) {
9761 /* Reject all multicast. */
9762 tg3_set_multi(tp, 0);
9764 /* Accept one or more multicast(s). */
9765 struct netdev_hw_addr *ha;
9766 u32 mc_filter[4] = { 0, };
9771 netdev_for_each_mc_addr(ha, dev) {
9772 crc = calc_crc(ha->addr, ETH_ALEN);
9774 regidx = (bit & 0x60) >> 5;
9776 mc_filter[regidx] |= (1 << bit);
9779 tw32(MAC_HASH_REG_0, mc_filter[0]);
9780 tw32(MAC_HASH_REG_1, mc_filter[1]);
9781 tw32(MAC_HASH_REG_2, mc_filter[2]);
9782 tw32(MAC_HASH_REG_3, mc_filter[3]);
9785 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9786 rx_mode |= RX_MODE_PROMISC;
9787 } else if (!(dev->flags & IFF_PROMISC)) {
9788 /* Add all entries into to the mac addr filter list */
9790 struct netdev_hw_addr *ha;
9792 netdev_for_each_uc_addr(ha, dev) {
9793 __tg3_set_one_mac_addr(tp, ha->addr,
9794 i + TG3_UCAST_ADDR_IDX(tp));
9799 if (rx_mode != tp->rx_mode) {
9800 tp->rx_mode = rx_mode;
9801 tw32_f(MAC_RX_MODE, rx_mode);
9806 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9810 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9811 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9814 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9818 if (!tg3_flag(tp, SUPPORT_MSIX))
9821 if (tp->rxq_cnt == 1) {
9822 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9826 /* Validate table against current IRQ count */
9827 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9828 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9832 if (i != TG3_RSS_INDIR_TBL_SIZE)
9833 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9836 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9839 u32 reg = MAC_RSS_INDIR_TBL_0;
9841 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9842 u32 val = tp->rss_ind_tbl[i];
9844 for (; i % 8; i++) {
9846 val |= tp->rss_ind_tbl[i];
9853 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9855 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9856 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9858 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9861 /* tp->lock is held. */
9862 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9864 u32 val, rdmac_mode;
9866 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9868 tg3_disable_ints(tp);
9872 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9874 if (tg3_flag(tp, INIT_COMPLETE))
9875 tg3_abort_hw(tp, 1);
9877 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9878 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9879 tg3_phy_pull_config(tp);
9880 tg3_eee_pull_config(tp, NULL);
9881 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9884 /* Enable MAC control of LPI */
9885 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9891 err = tg3_chip_reset(tp);
9895 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9897 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9898 val = tr32(TG3_CPMU_CTRL);
9899 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9900 tw32(TG3_CPMU_CTRL, val);
9902 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9903 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9904 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9905 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9907 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9908 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9909 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9910 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9912 val = tr32(TG3_CPMU_HST_ACC);
9913 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9914 val |= CPMU_HST_ACC_MACCLK_6_25;
9915 tw32(TG3_CPMU_HST_ACC, val);
9918 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9919 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9920 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9921 PCIE_PWR_MGMT_L1_THRESH_4MS;
9922 tw32(PCIE_PWR_MGMT_THRESH, val);
9924 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9925 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9927 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9929 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9930 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9933 if (tg3_flag(tp, L1PLLPD_EN)) {
9934 u32 grc_mode = tr32(GRC_MODE);
9936 /* Access the lower 1K of PL PCIE block registers. */
9937 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9938 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9940 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9941 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9942 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9944 tw32(GRC_MODE, grc_mode);
9947 if (tg3_flag(tp, 57765_CLASS)) {
9948 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9949 u32 grc_mode = tr32(GRC_MODE);
9951 /* Access the lower 1K of PL PCIE block registers. */
9952 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9953 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9955 val = tr32(TG3_PCIE_TLDLPL_PORT +
9956 TG3_PCIE_PL_LO_PHYCTL5);
9957 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9958 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9960 tw32(GRC_MODE, grc_mode);
9963 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9966 /* Fix transmit hangs */
9967 val = tr32(TG3_CPMU_PADRNG_CTL);
9968 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9969 tw32(TG3_CPMU_PADRNG_CTL, val);
9971 grc_mode = tr32(GRC_MODE);
9973 /* Access the lower 1K of DL PCIE block registers. */
9974 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9975 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9977 val = tr32(TG3_PCIE_TLDLPL_PORT +
9978 TG3_PCIE_DL_LO_FTSMAX);
9979 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9980 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9981 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9983 tw32(GRC_MODE, grc_mode);
9986 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9987 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9988 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9989 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9992 /* This works around an issue with Athlon chipsets on
9993 * B3 tigon3 silicon. This bit has no effect on any
9994 * other revision. But do not set this on PCI Express
9995 * chips and don't even touch the clocks if the CPMU is present.
9997 if (!tg3_flag(tp, CPMU_PRESENT)) {
9998 if (!tg3_flag(tp, PCI_EXPRESS))
9999 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10000 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10003 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10004 tg3_flag(tp, PCIX_MODE)) {
10005 val = tr32(TG3PCI_PCISTATE);
10006 val |= PCISTATE_RETRY_SAME_DMA;
10007 tw32(TG3PCI_PCISTATE, val);
10010 if (tg3_flag(tp, ENABLE_APE)) {
10011 /* Allow reads and writes to the
10012 * APE register and memory space.
10014 val = tr32(TG3PCI_PCISTATE);
10015 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10016 PCISTATE_ALLOW_APE_SHMEM_WR |
10017 PCISTATE_ALLOW_APE_PSPACE_WR;
10018 tw32(TG3PCI_PCISTATE, val);
10021 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10022 /* Enable some hw fixes. */
10023 val = tr32(TG3PCI_MSI_DATA);
10024 val |= (1 << 26) | (1 << 28) | (1 << 29);
10025 tw32(TG3PCI_MSI_DATA, val);
10028 /* Descriptor ring init may make accesses to the
10029 * NIC SRAM area to setup the TX descriptors, so we
10030 * can only do this after the hardware has been
10031 * successfully reset.
10033 err = tg3_init_rings(tp);
10037 if (tg3_flag(tp, 57765_PLUS)) {
10038 val = tr32(TG3PCI_DMA_RW_CTRL) &
10039 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10040 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10041 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10042 if (!tg3_flag(tp, 57765_CLASS) &&
10043 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10044 tg3_asic_rev(tp) != ASIC_REV_5762)
10045 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10046 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10047 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10048 tg3_asic_rev(tp) != ASIC_REV_5761) {
10049 /* This value is determined during the probe time DMA
10050 * engine test, tg3_test_dma.
10052 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10055 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10056 GRC_MODE_4X_NIC_SEND_RINGS |
10057 GRC_MODE_NO_TX_PHDR_CSUM |
10058 GRC_MODE_NO_RX_PHDR_CSUM);
10059 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10061 /* Pseudo-header checksum is done by hardware logic and not
10062 * the offload processers, so make the chip do the pseudo-
10063 * header checksums on receive. For transmit it is more
10064 * convenient to do the pseudo-header checksum in software
10065 * as Linux does that on transmit for us in all cases.
10067 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10069 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10071 tw32(TG3_RX_PTP_CTL,
10072 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10074 if (tg3_flag(tp, PTP_CAPABLE))
10075 val |= GRC_MODE_TIME_SYNC_ENABLE;
10077 tw32(GRC_MODE, tp->grc_mode | val);
10079 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10080 * south bridge limitation. As a workaround, Driver is setting MRRS
10081 * to 2048 instead of default 4096.
10083 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10084 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10085 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10086 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10089 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10090 val = tr32(GRC_MISC_CFG);
10092 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10093 tw32(GRC_MISC_CFG, val);
10095 /* Initialize MBUF/DESC pool. */
10096 if (tg3_flag(tp, 5750_PLUS)) {
10098 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10099 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10100 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10101 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10103 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10104 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10105 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10106 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10109 fw_len = tp->fw_len;
10110 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10111 tw32(BUFMGR_MB_POOL_ADDR,
10112 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10113 tw32(BUFMGR_MB_POOL_SIZE,
10114 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10117 if (tp->dev->mtu <= ETH_DATA_LEN) {
10118 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10119 tp->bufmgr_config.mbuf_read_dma_low_water);
10120 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10121 tp->bufmgr_config.mbuf_mac_rx_low_water);
10122 tw32(BUFMGR_MB_HIGH_WATER,
10123 tp->bufmgr_config.mbuf_high_water);
10125 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10126 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10127 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10128 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10129 tw32(BUFMGR_MB_HIGH_WATER,
10130 tp->bufmgr_config.mbuf_high_water_jumbo);
10132 tw32(BUFMGR_DMA_LOW_WATER,
10133 tp->bufmgr_config.dma_low_water);
10134 tw32(BUFMGR_DMA_HIGH_WATER,
10135 tp->bufmgr_config.dma_high_water);
10137 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10138 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10139 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10140 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10141 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10142 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10143 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10144 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10145 tw32(BUFMGR_MODE, val);
10146 for (i = 0; i < 2000; i++) {
10147 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10152 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10156 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10157 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10159 tg3_setup_rxbd_thresholds(tp);
10161 /* Initialize TG3_BDINFO's at:
10162 * RCVDBDI_STD_BD: standard eth size rx ring
10163 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10164 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10167 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10168 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10169 * ring attribute flags
10170 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10172 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10173 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10175 * The size of each ring is fixed in the firmware, but the location is
10178 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10179 ((u64) tpr->rx_std_mapping >> 32));
10180 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10181 ((u64) tpr->rx_std_mapping & 0xffffffff));
10182 if (!tg3_flag(tp, 5717_PLUS))
10183 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10184 NIC_SRAM_RX_BUFFER_DESC);
10186 /* Disable the mini ring */
10187 if (!tg3_flag(tp, 5705_PLUS))
10188 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10189 BDINFO_FLAGS_DISABLED);
10191 /* Program the jumbo buffer descriptor ring control
10192 * blocks on those devices that have them.
10194 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10195 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10197 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10198 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10199 ((u64) tpr->rx_jmb_mapping >> 32));
10200 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10201 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10202 val = TG3_RX_JMB_RING_SIZE(tp) <<
10203 BDINFO_FLAGS_MAXLEN_SHIFT;
10204 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10205 val | BDINFO_FLAGS_USE_EXT_RECV);
10206 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10207 tg3_flag(tp, 57765_CLASS) ||
10208 tg3_asic_rev(tp) == ASIC_REV_5762)
10209 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10210 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10212 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10213 BDINFO_FLAGS_DISABLED);
10216 if (tg3_flag(tp, 57765_PLUS)) {
10217 val = TG3_RX_STD_RING_SIZE(tp);
10218 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10219 val |= (TG3_RX_STD_DMA_SZ << 2);
10221 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10223 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10225 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10227 tpr->rx_std_prod_idx = tp->rx_pending;
10228 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10230 tpr->rx_jmb_prod_idx =
10231 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10232 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10234 tg3_rings_reset(tp);
10236 /* Initialize MAC address and backoff seed. */
10237 __tg3_set_mac_addr(tp, false);
10239 /* MTU + ethernet header + FCS + optional VLAN tag */
10240 tw32(MAC_RX_MTU_SIZE,
10241 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10243 /* The slot time is changed by tg3_setup_phy if we
10244 * run at gigabit with half duplex.
10246 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10247 (6 << TX_LENGTHS_IPG_SHIFT) |
10248 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10250 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10251 tg3_asic_rev(tp) == ASIC_REV_5762)
10252 val |= tr32(MAC_TX_LENGTHS) &
10253 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10254 TX_LENGTHS_CNT_DWN_VAL_MSK);
10256 tw32(MAC_TX_LENGTHS, val);
10258 /* Receive rules. */
10259 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10260 tw32(RCVLPC_CONFIG, 0x0181);
10262 /* Calculate RDMAC_MODE setting early, we need it to determine
10263 * the RCVLPC_STATE_ENABLE mask.
10265 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10266 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10267 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10268 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10269 RDMAC_MODE_LNGREAD_ENAB);
10271 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10272 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10274 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10275 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10276 tg3_asic_rev(tp) == ASIC_REV_57780)
10277 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10278 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10279 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10281 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10282 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10283 if (tg3_flag(tp, TSO_CAPABLE) &&
10284 tg3_asic_rev(tp) == ASIC_REV_5705) {
10285 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10286 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10287 !tg3_flag(tp, IS_5788)) {
10288 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10292 if (tg3_flag(tp, PCI_EXPRESS))
10293 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10295 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10297 if (tp->dev->mtu <= ETH_DATA_LEN) {
10298 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10299 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10303 if (tg3_flag(tp, HW_TSO_1) ||
10304 tg3_flag(tp, HW_TSO_2) ||
10305 tg3_flag(tp, HW_TSO_3))
10306 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10308 if (tg3_flag(tp, 57765_PLUS) ||
10309 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10310 tg3_asic_rev(tp) == ASIC_REV_57780)
10311 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10313 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10314 tg3_asic_rev(tp) == ASIC_REV_5762)
10315 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10317 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10318 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10319 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10320 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10321 tg3_flag(tp, 57765_PLUS)) {
10324 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10325 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10327 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10329 val = tr32(tgtreg);
10330 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10331 tg3_asic_rev(tp) == ASIC_REV_5762) {
10332 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10333 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10334 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10335 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10336 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10337 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10339 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10342 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10343 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10344 tg3_asic_rev(tp) == ASIC_REV_5762) {
10347 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10348 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10350 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10352 val = tr32(tgtreg);
10354 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10355 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10358 /* Receive/send statistics. */
10359 if (tg3_flag(tp, 5750_PLUS)) {
10360 val = tr32(RCVLPC_STATS_ENABLE);
10361 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10362 tw32(RCVLPC_STATS_ENABLE, val);
10363 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10364 tg3_flag(tp, TSO_CAPABLE)) {
10365 val = tr32(RCVLPC_STATS_ENABLE);
10366 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10367 tw32(RCVLPC_STATS_ENABLE, val);
10369 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10371 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10372 tw32(SNDDATAI_STATSENAB, 0xffffff);
10373 tw32(SNDDATAI_STATSCTRL,
10374 (SNDDATAI_SCTRL_ENABLE |
10375 SNDDATAI_SCTRL_FASTUPD));
10377 /* Setup host coalescing engine. */
10378 tw32(HOSTCC_MODE, 0);
10379 for (i = 0; i < 2000; i++) {
10380 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10385 __tg3_set_coalesce(tp, &tp->coal);
10387 if (!tg3_flag(tp, 5705_PLUS)) {
10388 /* Status/statistics block address. See tg3_timer,
10389 * the tg3_periodic_fetch_stats call there, and
10390 * tg3_get_stats to see how this works for 5705/5750 chips.
10392 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10393 ((u64) tp->stats_mapping >> 32));
10394 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10395 ((u64) tp->stats_mapping & 0xffffffff));
10396 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10398 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10400 /* Clear statistics and status block memory areas */
10401 for (i = NIC_SRAM_STATS_BLK;
10402 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10403 i += sizeof(u32)) {
10404 tg3_write_mem(tp, i, 0);
10409 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10411 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10412 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10413 if (!tg3_flag(tp, 5705_PLUS))
10414 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10416 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10417 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10418 /* reset to prevent losing 1st rx packet intermittently */
10419 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10423 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10424 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10425 MAC_MODE_FHDE_ENABLE;
10426 if (tg3_flag(tp, ENABLE_APE))
10427 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10428 if (!tg3_flag(tp, 5705_PLUS) &&
10429 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10430 tg3_asic_rev(tp) != ASIC_REV_5700)
10431 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10432 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10435 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10436 * If TG3_FLAG_IS_NIC is zero, we should read the
10437 * register to preserve the GPIO settings for LOMs. The GPIOs,
10438 * whether used as inputs or outputs, are set by boot code after
10441 if (!tg3_flag(tp, IS_NIC)) {
10444 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10445 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10446 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10448 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10449 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10450 GRC_LCLCTRL_GPIO_OUTPUT3;
10452 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10453 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10455 tp->grc_local_ctrl &= ~gpio_mask;
10456 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10458 /* GPIO1 must be driven high for eeprom write protect */
10459 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10460 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10461 GRC_LCLCTRL_GPIO_OUTPUT1);
10463 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10466 if (tg3_flag(tp, USING_MSIX)) {
10467 val = tr32(MSGINT_MODE);
10468 val |= MSGINT_MODE_ENABLE;
10469 if (tp->irq_cnt > 1)
10470 val |= MSGINT_MODE_MULTIVEC_EN;
10471 if (!tg3_flag(tp, 1SHOT_MSI))
10472 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10473 tw32(MSGINT_MODE, val);
10476 if (!tg3_flag(tp, 5705_PLUS)) {
10477 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10481 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10482 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10483 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10484 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10485 WDMAC_MODE_LNGREAD_ENAB);
10487 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10488 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10489 if (tg3_flag(tp, TSO_CAPABLE) &&
10490 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10491 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10493 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10494 !tg3_flag(tp, IS_5788)) {
10495 val |= WDMAC_MODE_RX_ACCEL;
10499 /* Enable host coalescing bug fix */
10500 if (tg3_flag(tp, 5755_PLUS))
10501 val |= WDMAC_MODE_STATUS_TAG_FIX;
10503 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10504 val |= WDMAC_MODE_BURST_ALL_DATA;
10506 tw32_f(WDMAC_MODE, val);
10509 if (tg3_flag(tp, PCIX_MODE)) {
10512 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10514 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10515 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10516 pcix_cmd |= PCI_X_CMD_READ_2K;
10517 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10518 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10519 pcix_cmd |= PCI_X_CMD_READ_2K;
10521 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10525 tw32_f(RDMAC_MODE, rdmac_mode);
10528 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10529 tg3_asic_rev(tp) == ASIC_REV_5720) {
10530 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10531 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10534 if (i < TG3_NUM_RDMA_CHANNELS) {
10535 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10536 val |= tg3_lso_rd_dma_workaround_bit(tp);
10537 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10538 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10542 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10543 if (!tg3_flag(tp, 5705_PLUS))
10544 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10546 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10547 tw32(SNDDATAC_MODE,
10548 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10550 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10552 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10553 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10554 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10555 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10556 val |= RCVDBDI_MODE_LRG_RING_SZ;
10557 tw32(RCVDBDI_MODE, val);
10558 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10559 if (tg3_flag(tp, HW_TSO_1) ||
10560 tg3_flag(tp, HW_TSO_2) ||
10561 tg3_flag(tp, HW_TSO_3))
10562 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10563 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10564 if (tg3_flag(tp, ENABLE_TSS))
10565 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10566 tw32(SNDBDI_MODE, val);
10567 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10569 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10570 err = tg3_load_5701_a0_firmware_fix(tp);
10575 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10576 /* Ignore any errors for the firmware download. If download
10577 * fails, the device will operate with EEE disabled
10579 tg3_load_57766_firmware(tp);
10582 if (tg3_flag(tp, TSO_CAPABLE)) {
10583 err = tg3_load_tso_firmware(tp);
10588 tp->tx_mode = TX_MODE_ENABLE;
10590 if (tg3_flag(tp, 5755_PLUS) ||
10591 tg3_asic_rev(tp) == ASIC_REV_5906)
10592 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10594 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10595 tg3_asic_rev(tp) == ASIC_REV_5762) {
10596 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10597 tp->tx_mode &= ~val;
10598 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10601 tw32_f(MAC_TX_MODE, tp->tx_mode);
10604 if (tg3_flag(tp, ENABLE_RSS)) {
10607 tg3_rss_write_indir_tbl(tp);
10609 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10611 for (i = 0; i < 10 ; i++)
10612 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10615 tp->rx_mode = RX_MODE_ENABLE;
10616 if (tg3_flag(tp, 5755_PLUS))
10617 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10619 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10620 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10622 if (tg3_flag(tp, ENABLE_RSS))
10623 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10624 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10625 RX_MODE_RSS_IPV6_HASH_EN |
10626 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10627 RX_MODE_RSS_IPV4_HASH_EN |
10628 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10630 tw32_f(MAC_RX_MODE, tp->rx_mode);
10633 tw32(MAC_LED_CTRL, tp->led_ctrl);
10635 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10636 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10637 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10640 tw32_f(MAC_RX_MODE, tp->rx_mode);
10643 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10644 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10645 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10646 /* Set drive transmission level to 1.2V */
10647 /* only if the signal pre-emphasis bit is not set */
10648 val = tr32(MAC_SERDES_CFG);
10651 tw32(MAC_SERDES_CFG, val);
10653 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10654 tw32(MAC_SERDES_CFG, 0x616000);
10657 /* Prevent chip from dropping frames when flow control
10660 if (tg3_flag(tp, 57765_CLASS))
10664 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10666 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10667 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10668 /* Use hardware link auto-negotiation */
10669 tg3_flag_set(tp, HW_AUTONEG);
10672 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10673 tg3_asic_rev(tp) == ASIC_REV_5714) {
10676 tmp = tr32(SERDES_RX_CTRL);
10677 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10678 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10679 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10680 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10683 if (!tg3_flag(tp, USE_PHYLIB)) {
10684 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10685 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10687 err = tg3_setup_phy(tp, false);
10691 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10692 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10695 /* Clear CRC stats. */
10696 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10697 tg3_writephy(tp, MII_TG3_TEST1,
10698 tmp | MII_TG3_TEST1_CRC_EN);
10699 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10704 __tg3_set_rx_mode(tp->dev);
10706 /* Initialize receive rules. */
10707 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10708 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10709 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10710 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10712 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10716 if (tg3_flag(tp, ENABLE_ASF))
10720 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10723 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10726 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10729 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10732 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10735 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10738 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10741 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10744 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10747 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10750 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10753 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10756 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10758 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10766 if (tg3_flag(tp, ENABLE_APE))
10767 /* Write our heartbeat update interval to APE. */
10768 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10769 APE_HOST_HEARTBEAT_INT_5SEC);
10771 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10776 /* Called at device open time to get the chip ready for
10777 * packet processing. Invoked with tp->lock held.
10779 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10781 /* Chip may have been just powered on. If so, the boot code may still
10782 * be running initialization. Wait for it to finish to avoid races in
10783 * accessing the hardware.
10785 tg3_enable_register_access(tp);
10788 tg3_switch_clocks(tp);
10790 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10792 return tg3_reset_hw(tp, reset_phy);
10795 #ifdef CONFIG_TIGON3_HWMON
10796 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10800 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10801 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10803 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10806 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10807 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10808 memset(ocir, 0, TG3_OCIR_LEN);
10812 /* sysfs attributes for hwmon */
10813 static ssize_t tg3_show_temp(struct device *dev,
10814 struct device_attribute *devattr, char *buf)
10816 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10817 struct tg3 *tp = dev_get_drvdata(dev);
10820 spin_lock_bh(&tp->lock);
10821 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10822 sizeof(temperature));
10823 spin_unlock_bh(&tp->lock);
10824 return sprintf(buf, "%u\n", temperature * 1000);
10828 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10829 TG3_TEMP_SENSOR_OFFSET);
10830 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10831 TG3_TEMP_CAUTION_OFFSET);
10832 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10833 TG3_TEMP_MAX_OFFSET);
10835 static struct attribute *tg3_attrs[] = {
10836 &sensor_dev_attr_temp1_input.dev_attr.attr,
10837 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10838 &sensor_dev_attr_temp1_max.dev_attr.attr,
10841 ATTRIBUTE_GROUPS(tg3);
10843 static void tg3_hwmon_close(struct tg3 *tp)
10845 if (tp->hwmon_dev) {
10846 hwmon_device_unregister(tp->hwmon_dev);
10847 tp->hwmon_dev = NULL;
10851 static void tg3_hwmon_open(struct tg3 *tp)
10855 struct pci_dev *pdev = tp->pdev;
10856 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10858 tg3_sd_scan_scratchpad(tp, ocirs);
10860 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10861 if (!ocirs[i].src_data_length)
10864 size += ocirs[i].src_hdr_length;
10865 size += ocirs[i].src_data_length;
10871 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10873 if (IS_ERR(tp->hwmon_dev)) {
10874 tp->hwmon_dev = NULL;
10875 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10879 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10880 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10881 #endif /* CONFIG_TIGON3_HWMON */
10884 #define TG3_STAT_ADD32(PSTAT, REG) \
10885 do { u32 __val = tr32(REG); \
10886 (PSTAT)->low += __val; \
10887 if ((PSTAT)->low < __val) \
10888 (PSTAT)->high += 1; \
10891 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10893 struct tg3_hw_stats *sp = tp->hw_stats;
10898 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10899 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10900 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10901 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10902 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10903 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10904 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10905 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10906 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10907 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10908 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10909 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10910 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10911 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10912 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10913 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10916 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10917 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10918 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10919 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10922 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10923 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10924 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10925 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10926 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10927 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10928 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10929 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10930 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10931 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10932 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10933 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10934 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10935 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10937 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10938 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10939 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10940 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10941 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10942 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10944 u32 val = tr32(HOSTCC_FLOW_ATTN);
10945 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10947 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10948 sp->rx_discards.low += val;
10949 if (sp->rx_discards.low < val)
10950 sp->rx_discards.high += 1;
10952 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10954 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10957 static void tg3_chk_missed_msi(struct tg3 *tp)
10961 for (i = 0; i < tp->irq_cnt; i++) {
10962 struct tg3_napi *tnapi = &tp->napi[i];
10964 if (tg3_has_work(tnapi)) {
10965 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10966 tnapi->last_tx_cons == tnapi->tx_cons) {
10967 if (tnapi->chk_msi_cnt < 1) {
10968 tnapi->chk_msi_cnt++;
10974 tnapi->chk_msi_cnt = 0;
10975 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10976 tnapi->last_tx_cons = tnapi->tx_cons;
10980 static void tg3_timer(struct timer_list *t)
10982 struct tg3 *tp = from_timer(tp, t, timer);
10984 spin_lock(&tp->lock);
10986 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10987 spin_unlock(&tp->lock);
10988 goto restart_timer;
10991 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10992 tg3_flag(tp, 57765_CLASS))
10993 tg3_chk_missed_msi(tp);
10995 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10996 /* BCM4785: Flush posted writes from GbE to host memory. */
11000 if (!tg3_flag(tp, TAGGED_STATUS)) {
11001 /* All of this garbage is because when using non-tagged
11002 * IRQ status the mailbox/status_block protocol the chip
11003 * uses with the cpu is race prone.
11005 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11006 tw32(GRC_LOCAL_CTRL,
11007 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11009 tw32(HOSTCC_MODE, tp->coalesce_mode |
11010 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11013 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11014 spin_unlock(&tp->lock);
11015 tg3_reset_task_schedule(tp);
11016 goto restart_timer;
11020 /* This part only runs once per second. */
11021 if (!--tp->timer_counter) {
11022 if (tg3_flag(tp, 5705_PLUS))
11023 tg3_periodic_fetch_stats(tp);
11025 if (tp->setlpicnt && !--tp->setlpicnt)
11026 tg3_phy_eee_enable(tp);
11028 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11032 mac_stat = tr32(MAC_STATUS);
11035 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11036 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11038 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11042 tg3_setup_phy(tp, false);
11043 } else if (tg3_flag(tp, POLL_SERDES)) {
11044 u32 mac_stat = tr32(MAC_STATUS);
11045 int need_setup = 0;
11048 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11051 if (!tp->link_up &&
11052 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11053 MAC_STATUS_SIGNAL_DET))) {
11057 if (!tp->serdes_counter) {
11060 ~MAC_MODE_PORT_MODE_MASK));
11062 tw32_f(MAC_MODE, tp->mac_mode);
11065 tg3_setup_phy(tp, false);
11067 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11068 tg3_flag(tp, 5780_CLASS)) {
11069 tg3_serdes_parallel_detect(tp);
11070 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11071 u32 cpmu = tr32(TG3_CPMU_STATUS);
11072 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11073 TG3_CPMU_STATUS_LINK_MASK);
11075 if (link_up != tp->link_up)
11076 tg3_setup_phy(tp, false);
11079 tp->timer_counter = tp->timer_multiplier;
11082 /* Heartbeat is only sent once every 2 seconds.
11084 * The heartbeat is to tell the ASF firmware that the host
11085 * driver is still alive. In the event that the OS crashes,
11086 * ASF needs to reset the hardware to free up the FIFO space
11087 * that may be filled with rx packets destined for the host.
11088 * If the FIFO is full, ASF will no longer function properly.
11090 * Unintended resets have been reported on real time kernels
11091 * where the timer doesn't run on time. Netpoll will also have
11094 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11095 * to check the ring condition when the heartbeat is expiring
11096 * before doing the reset. This will prevent most unintended
11099 if (!--tp->asf_counter) {
11100 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11101 tg3_wait_for_event_ack(tp);
11103 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11104 FWCMD_NICDRV_ALIVE3);
11105 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11106 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11107 TG3_FW_UPDATE_TIMEOUT_SEC);
11109 tg3_generate_fw_event(tp);
11111 tp->asf_counter = tp->asf_multiplier;
11114 /* Update the APE heartbeat every 5 seconds.*/
11115 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11117 spin_unlock(&tp->lock);
11120 tp->timer.expires = jiffies + tp->timer_offset;
11121 add_timer(&tp->timer);
11124 static void tg3_timer_init(struct tg3 *tp)
11126 if (tg3_flag(tp, TAGGED_STATUS) &&
11127 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11128 !tg3_flag(tp, 57765_CLASS))
11129 tp->timer_offset = HZ;
11131 tp->timer_offset = HZ / 10;
11133 BUG_ON(tp->timer_offset > HZ);
11135 tp->timer_multiplier = (HZ / tp->timer_offset);
11136 tp->asf_multiplier = (HZ / tp->timer_offset) *
11137 TG3_FW_UPDATE_FREQ_SEC;
11139 timer_setup(&tp->timer, tg3_timer, 0);
11142 static void tg3_timer_start(struct tg3 *tp)
11144 tp->asf_counter = tp->asf_multiplier;
11145 tp->timer_counter = tp->timer_multiplier;
11147 tp->timer.expires = jiffies + tp->timer_offset;
11148 add_timer(&tp->timer);
11151 static void tg3_timer_stop(struct tg3 *tp)
11153 del_timer_sync(&tp->timer);
11156 /* Restart hardware after configuration changes, self-test, etc.
11157 * Invoked with tp->lock held.
11159 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11160 __releases(tp->lock)
11161 __acquires(tp->lock)
11165 err = tg3_init_hw(tp, reset_phy);
11167 netdev_err(tp->dev,
11168 "Failed to re-initialize device, aborting\n");
11169 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11170 tg3_full_unlock(tp);
11171 tg3_timer_stop(tp);
11173 tg3_napi_enable(tp);
11174 dev_close(tp->dev);
11175 tg3_full_lock(tp, 0);
11180 static void tg3_reset_task(struct work_struct *work)
11182 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11186 tg3_full_lock(tp, 0);
11188 if (!netif_running(tp->dev)) {
11189 tg3_flag_clear(tp, RESET_TASK_PENDING);
11190 tg3_full_unlock(tp);
11195 tg3_full_unlock(tp);
11199 tg3_netif_stop(tp);
11201 tg3_full_lock(tp, 1);
11203 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11204 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11205 tp->write32_rx_mbox = tg3_write_flush_reg32;
11206 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11207 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11210 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11211 err = tg3_init_hw(tp, true);
11215 tg3_netif_start(tp);
11218 tg3_full_unlock(tp);
11223 tg3_flag_clear(tp, RESET_TASK_PENDING);
11227 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11230 unsigned long flags;
11232 struct tg3_napi *tnapi = &tp->napi[irq_num];
11234 if (tp->irq_cnt == 1)
11235 name = tp->dev->name;
11237 name = &tnapi->irq_lbl[0];
11238 if (tnapi->tx_buffers && tnapi->rx_rcb)
11239 snprintf(name, IFNAMSIZ,
11240 "%s-txrx-%d", tp->dev->name, irq_num);
11241 else if (tnapi->tx_buffers)
11242 snprintf(name, IFNAMSIZ,
11243 "%s-tx-%d", tp->dev->name, irq_num);
11244 else if (tnapi->rx_rcb)
11245 snprintf(name, IFNAMSIZ,
11246 "%s-rx-%d", tp->dev->name, irq_num);
11248 snprintf(name, IFNAMSIZ,
11249 "%s-%d", tp->dev->name, irq_num);
11250 name[IFNAMSIZ-1] = 0;
11253 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11255 if (tg3_flag(tp, 1SHOT_MSI))
11256 fn = tg3_msi_1shot;
11259 fn = tg3_interrupt;
11260 if (tg3_flag(tp, TAGGED_STATUS))
11261 fn = tg3_interrupt_tagged;
11262 flags = IRQF_SHARED;
11265 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11268 static int tg3_test_interrupt(struct tg3 *tp)
11270 struct tg3_napi *tnapi = &tp->napi[0];
11271 struct net_device *dev = tp->dev;
11272 int err, i, intr_ok = 0;
11275 if (!netif_running(dev))
11278 tg3_disable_ints(tp);
11280 free_irq(tnapi->irq_vec, tnapi);
11283 * Turn off MSI one shot mode. Otherwise this test has no
11284 * observable way to know whether the interrupt was delivered.
11286 if (tg3_flag(tp, 57765_PLUS)) {
11287 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11288 tw32(MSGINT_MODE, val);
11291 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11292 IRQF_SHARED, dev->name, tnapi);
11296 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11297 tg3_enable_ints(tp);
11299 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11302 for (i = 0; i < 5; i++) {
11303 u32 int_mbox, misc_host_ctrl;
11305 int_mbox = tr32_mailbox(tnapi->int_mbox);
11306 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11308 if ((int_mbox != 0) ||
11309 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11314 if (tg3_flag(tp, 57765_PLUS) &&
11315 tnapi->hw_status->status_tag != tnapi->last_tag)
11316 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11321 tg3_disable_ints(tp);
11323 free_irq(tnapi->irq_vec, tnapi);
11325 err = tg3_request_irq(tp, 0);
11331 /* Reenable MSI one shot mode. */
11332 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11333 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11334 tw32(MSGINT_MODE, val);
11342 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11343 * successfully restored
11345 static int tg3_test_msi(struct tg3 *tp)
11350 if (!tg3_flag(tp, USING_MSI))
11353 /* Turn off SERR reporting in case MSI terminates with Master
11356 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11357 pci_write_config_word(tp->pdev, PCI_COMMAND,
11358 pci_cmd & ~PCI_COMMAND_SERR);
11360 err = tg3_test_interrupt(tp);
11362 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11367 /* other failures */
11371 /* MSI test failed, go back to INTx mode */
11372 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11373 "to INTx mode. Please report this failure to the PCI "
11374 "maintainer and include system chipset information\n");
11376 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11378 pci_disable_msi(tp->pdev);
11380 tg3_flag_clear(tp, USING_MSI);
11381 tp->napi[0].irq_vec = tp->pdev->irq;
11383 err = tg3_request_irq(tp, 0);
11387 /* Need to reset the chip because the MSI cycle may have terminated
11388 * with Master Abort.
11390 tg3_full_lock(tp, 1);
11392 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11393 err = tg3_init_hw(tp, true);
11395 tg3_full_unlock(tp);
11398 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11403 static int tg3_request_firmware(struct tg3 *tp)
11405 const struct tg3_firmware_hdr *fw_hdr;
11407 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11408 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11413 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11415 /* Firmware blob starts with version numbers, followed by
11416 * start address and _full_ length including BSS sections
11417 * (which must be longer than the actual data, of course
11420 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11421 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11422 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11423 tp->fw_len, tp->fw_needed);
11424 release_firmware(tp->fw);
11429 /* We no longer need firmware; we have it. */
11430 tp->fw_needed = NULL;
11434 static u32 tg3_irq_count(struct tg3 *tp)
11436 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11439 /* We want as many rx rings enabled as there are cpus.
11440 * In multiqueue MSI-X mode, the first MSI-X vector
11441 * only deals with link interrupts, etc, so we add
11442 * one to the number of vectors we are requesting.
11444 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11450 static bool tg3_enable_msix(struct tg3 *tp)
11453 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11455 tp->txq_cnt = tp->txq_req;
11456 tp->rxq_cnt = tp->rxq_req;
11458 tp->rxq_cnt = netif_get_num_default_rss_queues();
11459 if (tp->rxq_cnt > tp->rxq_max)
11460 tp->rxq_cnt = tp->rxq_max;
11462 /* Disable multiple TX rings by default. Simple round-robin hardware
11463 * scheduling of the TX rings can cause starvation of rings with
11464 * small packets when other rings have TSO or jumbo packets.
11469 tp->irq_cnt = tg3_irq_count(tp);
11471 for (i = 0; i < tp->irq_max; i++) {
11472 msix_ent[i].entry = i;
11473 msix_ent[i].vector = 0;
11476 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11479 } else if (rc < tp->irq_cnt) {
11480 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11483 tp->rxq_cnt = max(rc - 1, 1);
11485 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11488 for (i = 0; i < tp->irq_max; i++)
11489 tp->napi[i].irq_vec = msix_ent[i].vector;
11491 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11492 pci_disable_msix(tp->pdev);
11496 if (tp->irq_cnt == 1)
11499 tg3_flag_set(tp, ENABLE_RSS);
11501 if (tp->txq_cnt > 1)
11502 tg3_flag_set(tp, ENABLE_TSS);
11504 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11509 static void tg3_ints_init(struct tg3 *tp)
11511 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11512 !tg3_flag(tp, TAGGED_STATUS)) {
11513 /* All MSI supporting chips should support tagged
11514 * status. Assert that this is the case.
11516 netdev_warn(tp->dev,
11517 "MSI without TAGGED_STATUS? Not using MSI\n");
11521 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11522 tg3_flag_set(tp, USING_MSIX);
11523 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11524 tg3_flag_set(tp, USING_MSI);
11526 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11527 u32 msi_mode = tr32(MSGINT_MODE);
11528 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11529 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11530 if (!tg3_flag(tp, 1SHOT_MSI))
11531 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11532 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11535 if (!tg3_flag(tp, USING_MSIX)) {
11537 tp->napi[0].irq_vec = tp->pdev->irq;
11540 if (tp->irq_cnt == 1) {
11543 netif_set_real_num_tx_queues(tp->dev, 1);
11544 netif_set_real_num_rx_queues(tp->dev, 1);
11548 static void tg3_ints_fini(struct tg3 *tp)
11550 if (tg3_flag(tp, USING_MSIX))
11551 pci_disable_msix(tp->pdev);
11552 else if (tg3_flag(tp, USING_MSI))
11553 pci_disable_msi(tp->pdev);
11554 tg3_flag_clear(tp, USING_MSI);
11555 tg3_flag_clear(tp, USING_MSIX);
11556 tg3_flag_clear(tp, ENABLE_RSS);
11557 tg3_flag_clear(tp, ENABLE_TSS);
11560 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11563 struct net_device *dev = tp->dev;
11567 * Setup interrupts first so we know how
11568 * many NAPI resources to allocate
11572 tg3_rss_check_indir_tbl(tp);
11574 /* The placement of this call is tied
11575 * to the setup and use of Host TX descriptors.
11577 err = tg3_alloc_consistent(tp);
11579 goto out_ints_fini;
11583 tg3_napi_enable(tp);
11585 for (i = 0; i < tp->irq_cnt; i++) {
11586 err = tg3_request_irq(tp, i);
11588 for (i--; i >= 0; i--) {
11589 struct tg3_napi *tnapi = &tp->napi[i];
11591 free_irq(tnapi->irq_vec, tnapi);
11593 goto out_napi_fini;
11597 tg3_full_lock(tp, 0);
11600 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11602 err = tg3_init_hw(tp, reset_phy);
11604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605 tg3_free_rings(tp);
11608 tg3_full_unlock(tp);
11613 if (test_irq && tg3_flag(tp, USING_MSI)) {
11614 err = tg3_test_msi(tp);
11617 tg3_full_lock(tp, 0);
11618 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11619 tg3_free_rings(tp);
11620 tg3_full_unlock(tp);
11622 goto out_napi_fini;
11625 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11626 u32 val = tr32(PCIE_TRANSACTION_CFG);
11628 tw32(PCIE_TRANSACTION_CFG,
11629 val | PCIE_TRANS_CFG_1SHOT_MSI);
11635 tg3_hwmon_open(tp);
11637 tg3_full_lock(tp, 0);
11639 tg3_timer_start(tp);
11640 tg3_flag_set(tp, INIT_COMPLETE);
11641 tg3_enable_ints(tp);
11643 tg3_ptp_resume(tp);
11645 tg3_full_unlock(tp);
11647 netif_tx_start_all_queues(dev);
11650 * Reset loopback feature if it was turned on while the device was down
11651 * make sure that it's installed properly now.
11653 if (dev->features & NETIF_F_LOOPBACK)
11654 tg3_set_loopback(dev, dev->features);
11659 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11660 struct tg3_napi *tnapi = &tp->napi[i];
11661 free_irq(tnapi->irq_vec, tnapi);
11665 tg3_napi_disable(tp);
11667 tg3_free_consistent(tp);
11675 static void tg3_stop(struct tg3 *tp)
11679 tg3_reset_task_cancel(tp);
11680 tg3_netif_stop(tp);
11682 tg3_timer_stop(tp);
11684 tg3_hwmon_close(tp);
11688 tg3_full_lock(tp, 1);
11690 tg3_disable_ints(tp);
11692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11693 tg3_free_rings(tp);
11694 tg3_flag_clear(tp, INIT_COMPLETE);
11696 tg3_full_unlock(tp);
11698 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11699 struct tg3_napi *tnapi = &tp->napi[i];
11700 free_irq(tnapi->irq_vec, tnapi);
11707 tg3_free_consistent(tp);
11710 static int tg3_open(struct net_device *dev)
11712 struct tg3 *tp = netdev_priv(dev);
11715 if (tp->pcierr_recovery) {
11716 netdev_err(dev, "Failed to open device. PCI error recovery "
11721 if (tp->fw_needed) {
11722 err = tg3_request_firmware(tp);
11723 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11725 netdev_warn(tp->dev, "EEE capability disabled\n");
11726 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11727 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11728 netdev_warn(tp->dev, "EEE capability restored\n");
11729 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11731 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11735 netdev_warn(tp->dev, "TSO capability disabled\n");
11736 tg3_flag_clear(tp, TSO_CAPABLE);
11737 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11738 netdev_notice(tp->dev, "TSO capability restored\n");
11739 tg3_flag_set(tp, TSO_CAPABLE);
11743 tg3_carrier_off(tp);
11745 err = tg3_power_up(tp);
11749 tg3_full_lock(tp, 0);
11751 tg3_disable_ints(tp);
11752 tg3_flag_clear(tp, INIT_COMPLETE);
11754 tg3_full_unlock(tp);
11756 err = tg3_start(tp,
11757 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11760 tg3_frob_aux_power(tp, false);
11761 pci_set_power_state(tp->pdev, PCI_D3hot);
11767 static int tg3_close(struct net_device *dev)
11769 struct tg3 *tp = netdev_priv(dev);
11771 if (tp->pcierr_recovery) {
11772 netdev_err(dev, "Failed to close device. PCI error recovery "
11779 if (pci_device_is_present(tp->pdev)) {
11780 tg3_power_down_prepare(tp);
11782 tg3_carrier_off(tp);
11787 static inline u64 get_stat64(tg3_stat64_t *val)
11789 return ((u64)val->high << 32) | ((u64)val->low);
11792 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11794 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11796 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11797 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11798 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11801 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11802 tg3_writephy(tp, MII_TG3_TEST1,
11803 val | MII_TG3_TEST1_CRC_EN);
11804 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11808 tp->phy_crc_errors += val;
11810 return tp->phy_crc_errors;
11813 return get_stat64(&hw_stats->rx_fcs_errors);
11816 #define ESTAT_ADD(member) \
11817 estats->member = old_estats->member + \
11818 get_stat64(&hw_stats->member)
11820 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11822 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11823 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11825 ESTAT_ADD(rx_octets);
11826 ESTAT_ADD(rx_fragments);
11827 ESTAT_ADD(rx_ucast_packets);
11828 ESTAT_ADD(rx_mcast_packets);
11829 ESTAT_ADD(rx_bcast_packets);
11830 ESTAT_ADD(rx_fcs_errors);
11831 ESTAT_ADD(rx_align_errors);
11832 ESTAT_ADD(rx_xon_pause_rcvd);
11833 ESTAT_ADD(rx_xoff_pause_rcvd);
11834 ESTAT_ADD(rx_mac_ctrl_rcvd);
11835 ESTAT_ADD(rx_xoff_entered);
11836 ESTAT_ADD(rx_frame_too_long_errors);
11837 ESTAT_ADD(rx_jabbers);
11838 ESTAT_ADD(rx_undersize_packets);
11839 ESTAT_ADD(rx_in_length_errors);
11840 ESTAT_ADD(rx_out_length_errors);
11841 ESTAT_ADD(rx_64_or_less_octet_packets);
11842 ESTAT_ADD(rx_65_to_127_octet_packets);
11843 ESTAT_ADD(rx_128_to_255_octet_packets);
11844 ESTAT_ADD(rx_256_to_511_octet_packets);
11845 ESTAT_ADD(rx_512_to_1023_octet_packets);
11846 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11847 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11848 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11849 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11850 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11852 ESTAT_ADD(tx_octets);
11853 ESTAT_ADD(tx_collisions);
11854 ESTAT_ADD(tx_xon_sent);
11855 ESTAT_ADD(tx_xoff_sent);
11856 ESTAT_ADD(tx_flow_control);
11857 ESTAT_ADD(tx_mac_errors);
11858 ESTAT_ADD(tx_single_collisions);
11859 ESTAT_ADD(tx_mult_collisions);
11860 ESTAT_ADD(tx_deferred);
11861 ESTAT_ADD(tx_excessive_collisions);
11862 ESTAT_ADD(tx_late_collisions);
11863 ESTAT_ADD(tx_collide_2times);
11864 ESTAT_ADD(tx_collide_3times);
11865 ESTAT_ADD(tx_collide_4times);
11866 ESTAT_ADD(tx_collide_5times);
11867 ESTAT_ADD(tx_collide_6times);
11868 ESTAT_ADD(tx_collide_7times);
11869 ESTAT_ADD(tx_collide_8times);
11870 ESTAT_ADD(tx_collide_9times);
11871 ESTAT_ADD(tx_collide_10times);
11872 ESTAT_ADD(tx_collide_11times);
11873 ESTAT_ADD(tx_collide_12times);
11874 ESTAT_ADD(tx_collide_13times);
11875 ESTAT_ADD(tx_collide_14times);
11876 ESTAT_ADD(tx_collide_15times);
11877 ESTAT_ADD(tx_ucast_packets);
11878 ESTAT_ADD(tx_mcast_packets);
11879 ESTAT_ADD(tx_bcast_packets);
11880 ESTAT_ADD(tx_carrier_sense_errors);
11881 ESTAT_ADD(tx_discards);
11882 ESTAT_ADD(tx_errors);
11884 ESTAT_ADD(dma_writeq_full);
11885 ESTAT_ADD(dma_write_prioq_full);
11886 ESTAT_ADD(rxbds_empty);
11887 ESTAT_ADD(rx_discards);
11888 ESTAT_ADD(rx_errors);
11889 ESTAT_ADD(rx_threshold_hit);
11891 ESTAT_ADD(dma_readq_full);
11892 ESTAT_ADD(dma_read_prioq_full);
11893 ESTAT_ADD(tx_comp_queue_full);
11895 ESTAT_ADD(ring_set_send_prod_index);
11896 ESTAT_ADD(ring_status_update);
11897 ESTAT_ADD(nic_irqs);
11898 ESTAT_ADD(nic_avoided_irqs);
11899 ESTAT_ADD(nic_tx_threshold_hit);
11901 ESTAT_ADD(mbuf_lwm_thresh_hit);
11904 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11906 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11907 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11909 stats->rx_packets = old_stats->rx_packets +
11910 get_stat64(&hw_stats->rx_ucast_packets) +
11911 get_stat64(&hw_stats->rx_mcast_packets) +
11912 get_stat64(&hw_stats->rx_bcast_packets);
11914 stats->tx_packets = old_stats->tx_packets +
11915 get_stat64(&hw_stats->tx_ucast_packets) +
11916 get_stat64(&hw_stats->tx_mcast_packets) +
11917 get_stat64(&hw_stats->tx_bcast_packets);
11919 stats->rx_bytes = old_stats->rx_bytes +
11920 get_stat64(&hw_stats->rx_octets);
11921 stats->tx_bytes = old_stats->tx_bytes +
11922 get_stat64(&hw_stats->tx_octets);
11924 stats->rx_errors = old_stats->rx_errors +
11925 get_stat64(&hw_stats->rx_errors);
11926 stats->tx_errors = old_stats->tx_errors +
11927 get_stat64(&hw_stats->tx_errors) +
11928 get_stat64(&hw_stats->tx_mac_errors) +
11929 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11930 get_stat64(&hw_stats->tx_discards);
11932 stats->multicast = old_stats->multicast +
11933 get_stat64(&hw_stats->rx_mcast_packets);
11934 stats->collisions = old_stats->collisions +
11935 get_stat64(&hw_stats->tx_collisions);
11937 stats->rx_length_errors = old_stats->rx_length_errors +
11938 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11939 get_stat64(&hw_stats->rx_undersize_packets);
11941 stats->rx_frame_errors = old_stats->rx_frame_errors +
11942 get_stat64(&hw_stats->rx_align_errors);
11943 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11944 get_stat64(&hw_stats->tx_discards);
11945 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11946 get_stat64(&hw_stats->tx_carrier_sense_errors);
11948 stats->rx_crc_errors = old_stats->rx_crc_errors +
11949 tg3_calc_crc_errors(tp);
11951 stats->rx_missed_errors = old_stats->rx_missed_errors +
11952 get_stat64(&hw_stats->rx_discards);
11954 stats->rx_dropped = tp->rx_dropped;
11955 stats->tx_dropped = tp->tx_dropped;
11958 static int tg3_get_regs_len(struct net_device *dev)
11960 return TG3_REG_BLK_SIZE;
11963 static void tg3_get_regs(struct net_device *dev,
11964 struct ethtool_regs *regs, void *_p)
11966 struct tg3 *tp = netdev_priv(dev);
11970 memset(_p, 0, TG3_REG_BLK_SIZE);
11972 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11975 tg3_full_lock(tp, 0);
11977 tg3_dump_legacy_regs(tp, (u32 *)_p);
11979 tg3_full_unlock(tp);
11982 static int tg3_get_eeprom_len(struct net_device *dev)
11984 struct tg3 *tp = netdev_priv(dev);
11986 return tp->nvram_size;
11989 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11991 struct tg3 *tp = netdev_priv(dev);
11992 int ret, cpmu_restore = 0;
11994 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11997 if (tg3_flag(tp, NO_NVRAM))
12000 offset = eeprom->offset;
12004 eeprom->magic = TG3_EEPROM_MAGIC;
12006 /* Override clock, link aware and link idle modes */
12007 if (tg3_flag(tp, CPMU_PRESENT)) {
12008 cpmu_val = tr32(TG3_CPMU_CTRL);
12009 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12010 CPMU_CTRL_LINK_IDLE_MODE)) {
12011 tw32(TG3_CPMU_CTRL, cpmu_val &
12012 ~(CPMU_CTRL_LINK_AWARE_MODE |
12013 CPMU_CTRL_LINK_IDLE_MODE));
12017 tg3_override_clk(tp);
12020 /* adjustments to start on required 4 byte boundary */
12021 b_offset = offset & 3;
12022 b_count = 4 - b_offset;
12023 if (b_count > len) {
12024 /* i.e. offset=1 len=2 */
12027 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12030 memcpy(data, ((char *)&val) + b_offset, b_count);
12033 eeprom->len += b_count;
12036 /* read bytes up to the last 4 byte boundary */
12037 pd = &data[eeprom->len];
12038 for (i = 0; i < (len - (len & 3)); i += 4) {
12039 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12046 memcpy(pd + i, &val, 4);
12047 if (need_resched()) {
12048 if (signal_pending(current)) {
12059 /* read last bytes not ending on 4 byte boundary */
12060 pd = &data[eeprom->len];
12062 b_offset = offset + len - b_count;
12063 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12066 memcpy(pd, &val, b_count);
12067 eeprom->len += b_count;
12072 /* Restore clock, link aware and link idle modes */
12073 tg3_restore_clk(tp);
12075 tw32(TG3_CPMU_CTRL, cpmu_val);
12080 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12082 struct tg3 *tp = netdev_priv(dev);
12084 u32 offset, len, b_offset, odd_len;
12086 __be32 start = 0, end;
12088 if (tg3_flag(tp, NO_NVRAM) ||
12089 eeprom->magic != TG3_EEPROM_MAGIC)
12092 offset = eeprom->offset;
12095 if ((b_offset = (offset & 3))) {
12096 /* adjustments to start on required 4 byte boundary */
12097 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12108 /* adjustments to end on required 4 byte boundary */
12110 len = (len + 3) & ~3;
12111 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12117 if (b_offset || odd_len) {
12118 buf = kmalloc(len, GFP_KERNEL);
12122 memcpy(buf, &start, 4);
12124 memcpy(buf+len-4, &end, 4);
12125 memcpy(buf + b_offset, data, eeprom->len);
12128 ret = tg3_nvram_write_block(tp, offset, len, buf);
12136 static int tg3_get_link_ksettings(struct net_device *dev,
12137 struct ethtool_link_ksettings *cmd)
12139 struct tg3 *tp = netdev_priv(dev);
12140 u32 supported, advertising;
12142 if (tg3_flag(tp, USE_PHYLIB)) {
12143 struct phy_device *phydev;
12144 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12146 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12147 phy_ethtool_ksettings_get(phydev, cmd);
12152 supported = (SUPPORTED_Autoneg);
12154 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12155 supported |= (SUPPORTED_1000baseT_Half |
12156 SUPPORTED_1000baseT_Full);
12158 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12159 supported |= (SUPPORTED_100baseT_Half |
12160 SUPPORTED_100baseT_Full |
12161 SUPPORTED_10baseT_Half |
12162 SUPPORTED_10baseT_Full |
12164 cmd->base.port = PORT_TP;
12166 supported |= SUPPORTED_FIBRE;
12167 cmd->base.port = PORT_FIBRE;
12169 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12172 advertising = tp->link_config.advertising;
12173 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12174 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12175 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12176 advertising |= ADVERTISED_Pause;
12178 advertising |= ADVERTISED_Pause |
12179 ADVERTISED_Asym_Pause;
12181 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12182 advertising |= ADVERTISED_Asym_Pause;
12185 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12188 if (netif_running(dev) && tp->link_up) {
12189 cmd->base.speed = tp->link_config.active_speed;
12190 cmd->base.duplex = tp->link_config.active_duplex;
12191 ethtool_convert_legacy_u32_to_link_mode(
12192 cmd->link_modes.lp_advertising,
12193 tp->link_config.rmt_adv);
12195 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12196 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12197 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12199 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12202 cmd->base.speed = SPEED_UNKNOWN;
12203 cmd->base.duplex = DUPLEX_UNKNOWN;
12204 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12206 cmd->base.phy_address = tp->phy_addr;
12207 cmd->base.autoneg = tp->link_config.autoneg;
12211 static int tg3_set_link_ksettings(struct net_device *dev,
12212 const struct ethtool_link_ksettings *cmd)
12214 struct tg3 *tp = netdev_priv(dev);
12215 u32 speed = cmd->base.speed;
12218 if (tg3_flag(tp, USE_PHYLIB)) {
12219 struct phy_device *phydev;
12220 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12222 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12223 return phy_ethtool_ksettings_set(phydev, cmd);
12226 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12227 cmd->base.autoneg != AUTONEG_DISABLE)
12230 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12231 cmd->base.duplex != DUPLEX_FULL &&
12232 cmd->base.duplex != DUPLEX_HALF)
12235 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12236 cmd->link_modes.advertising);
12238 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12239 u32 mask = ADVERTISED_Autoneg |
12241 ADVERTISED_Asym_Pause;
12243 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12244 mask |= ADVERTISED_1000baseT_Half |
12245 ADVERTISED_1000baseT_Full;
12247 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12248 mask |= ADVERTISED_100baseT_Half |
12249 ADVERTISED_100baseT_Full |
12250 ADVERTISED_10baseT_Half |
12251 ADVERTISED_10baseT_Full |
12254 mask |= ADVERTISED_FIBRE;
12256 if (advertising & ~mask)
12259 mask &= (ADVERTISED_1000baseT_Half |
12260 ADVERTISED_1000baseT_Full |
12261 ADVERTISED_100baseT_Half |
12262 ADVERTISED_100baseT_Full |
12263 ADVERTISED_10baseT_Half |
12264 ADVERTISED_10baseT_Full);
12266 advertising &= mask;
12268 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12269 if (speed != SPEED_1000)
12272 if (cmd->base.duplex != DUPLEX_FULL)
12275 if (speed != SPEED_100 &&
12281 tg3_full_lock(tp, 0);
12283 tp->link_config.autoneg = cmd->base.autoneg;
12284 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12285 tp->link_config.advertising = (advertising |
12286 ADVERTISED_Autoneg);
12287 tp->link_config.speed = SPEED_UNKNOWN;
12288 tp->link_config.duplex = DUPLEX_UNKNOWN;
12290 tp->link_config.advertising = 0;
12291 tp->link_config.speed = speed;
12292 tp->link_config.duplex = cmd->base.duplex;
12295 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12297 tg3_warn_mgmt_link_flap(tp);
12299 if (netif_running(dev))
12300 tg3_setup_phy(tp, true);
12302 tg3_full_unlock(tp);
12307 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12309 struct tg3 *tp = netdev_priv(dev);
12311 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12312 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12313 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12314 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12317 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12319 struct tg3 *tp = netdev_priv(dev);
12321 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12322 wol->supported = WAKE_MAGIC;
12324 wol->supported = 0;
12326 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12327 wol->wolopts = WAKE_MAGIC;
12328 memset(&wol->sopass, 0, sizeof(wol->sopass));
12331 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12333 struct tg3 *tp = netdev_priv(dev);
12334 struct device *dp = &tp->pdev->dev;
12336 if (wol->wolopts & ~WAKE_MAGIC)
12338 if ((wol->wolopts & WAKE_MAGIC) &&
12339 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12342 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12344 if (device_may_wakeup(dp))
12345 tg3_flag_set(tp, WOL_ENABLE);
12347 tg3_flag_clear(tp, WOL_ENABLE);
12352 static u32 tg3_get_msglevel(struct net_device *dev)
12354 struct tg3 *tp = netdev_priv(dev);
12355 return tp->msg_enable;
12358 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12360 struct tg3 *tp = netdev_priv(dev);
12361 tp->msg_enable = value;
12364 static int tg3_nway_reset(struct net_device *dev)
12366 struct tg3 *tp = netdev_priv(dev);
12369 if (!netif_running(dev))
12372 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12375 tg3_warn_mgmt_link_flap(tp);
12377 if (tg3_flag(tp, USE_PHYLIB)) {
12378 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12380 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12384 spin_lock_bh(&tp->lock);
12386 tg3_readphy(tp, MII_BMCR, &bmcr);
12387 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12388 ((bmcr & BMCR_ANENABLE) ||
12389 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12390 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12394 spin_unlock_bh(&tp->lock);
12400 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12402 struct tg3 *tp = netdev_priv(dev);
12404 ering->rx_max_pending = tp->rx_std_ring_mask;
12405 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12406 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12408 ering->rx_jumbo_max_pending = 0;
12410 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12412 ering->rx_pending = tp->rx_pending;
12413 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12414 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12416 ering->rx_jumbo_pending = 0;
12418 ering->tx_pending = tp->napi[0].tx_pending;
12421 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12423 struct tg3 *tp = netdev_priv(dev);
12424 int i, irq_sync = 0, err = 0;
12426 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12427 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12428 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12429 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12430 (tg3_flag(tp, TSO_BUG) &&
12431 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12434 if (netif_running(dev)) {
12436 tg3_netif_stop(tp);
12440 tg3_full_lock(tp, irq_sync);
12442 tp->rx_pending = ering->rx_pending;
12444 if (tg3_flag(tp, MAX_RXPEND_64) &&
12445 tp->rx_pending > 63)
12446 tp->rx_pending = 63;
12448 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12449 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12451 for (i = 0; i < tp->irq_max; i++)
12452 tp->napi[i].tx_pending = ering->tx_pending;
12454 if (netif_running(dev)) {
12455 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12456 err = tg3_restart_hw(tp, false);
12458 tg3_netif_start(tp);
12461 tg3_full_unlock(tp);
12463 if (irq_sync && !err)
12469 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12471 struct tg3 *tp = netdev_priv(dev);
12473 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12475 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12476 epause->rx_pause = 1;
12478 epause->rx_pause = 0;
12480 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12481 epause->tx_pause = 1;
12483 epause->tx_pause = 0;
12486 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12488 struct tg3 *tp = netdev_priv(dev);
12491 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12492 tg3_warn_mgmt_link_flap(tp);
12494 if (tg3_flag(tp, USE_PHYLIB)) {
12495 struct phy_device *phydev;
12497 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12499 if (!phy_validate_pause(phydev, epause))
12502 tp->link_config.flowctrl = 0;
12503 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12504 if (epause->rx_pause) {
12505 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12507 if (epause->tx_pause) {
12508 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12510 } else if (epause->tx_pause) {
12511 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12514 if (epause->autoneg)
12515 tg3_flag_set(tp, PAUSE_AUTONEG);
12517 tg3_flag_clear(tp, PAUSE_AUTONEG);
12519 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12520 if (phydev->autoneg) {
12521 /* phy_set_asym_pause() will
12522 * renegotiate the link to inform our
12523 * link partner of our flow control
12524 * settings, even if the flow control
12525 * is forced. Let tg3_adjust_link()
12526 * do the final flow control setup.
12531 if (!epause->autoneg)
12532 tg3_setup_flow_control(tp, 0, 0);
12537 if (netif_running(dev)) {
12538 tg3_netif_stop(tp);
12542 tg3_full_lock(tp, irq_sync);
12544 if (epause->autoneg)
12545 tg3_flag_set(tp, PAUSE_AUTONEG);
12547 tg3_flag_clear(tp, PAUSE_AUTONEG);
12548 if (epause->rx_pause)
12549 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12551 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12552 if (epause->tx_pause)
12553 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12555 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12557 if (netif_running(dev)) {
12558 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12559 err = tg3_restart_hw(tp, false);
12561 tg3_netif_start(tp);
12564 tg3_full_unlock(tp);
12567 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12572 static int tg3_get_sset_count(struct net_device *dev, int sset)
12576 return TG3_NUM_TEST;
12578 return TG3_NUM_STATS;
12580 return -EOPNOTSUPP;
12584 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12585 u32 *rules __always_unused)
12587 struct tg3 *tp = netdev_priv(dev);
12589 if (!tg3_flag(tp, SUPPORT_MSIX))
12590 return -EOPNOTSUPP;
12592 switch (info->cmd) {
12593 case ETHTOOL_GRXRINGS:
12594 if (netif_running(tp->dev))
12595 info->data = tp->rxq_cnt;
12597 info->data = num_online_cpus();
12598 if (info->data > TG3_RSS_MAX_NUM_QS)
12599 info->data = TG3_RSS_MAX_NUM_QS;
12605 return -EOPNOTSUPP;
12609 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12612 struct tg3 *tp = netdev_priv(dev);
12614 if (tg3_flag(tp, SUPPORT_MSIX))
12615 size = TG3_RSS_INDIR_TBL_SIZE;
12620 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12622 struct tg3 *tp = netdev_priv(dev);
12626 *hfunc = ETH_RSS_HASH_TOP;
12630 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12631 indir[i] = tp->rss_ind_tbl[i];
12636 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12639 struct tg3 *tp = netdev_priv(dev);
12642 /* We require at least one supported parameter to be changed and no
12643 * change in any of the unsupported parameters
12646 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12647 return -EOPNOTSUPP;
12652 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12653 tp->rss_ind_tbl[i] = indir[i];
12655 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12658 /* It is legal to write the indirection
12659 * table while the device is running.
12661 tg3_full_lock(tp, 0);
12662 tg3_rss_write_indir_tbl(tp);
12663 tg3_full_unlock(tp);
12668 static void tg3_get_channels(struct net_device *dev,
12669 struct ethtool_channels *channel)
12671 struct tg3 *tp = netdev_priv(dev);
12672 u32 deflt_qs = netif_get_num_default_rss_queues();
12674 channel->max_rx = tp->rxq_max;
12675 channel->max_tx = tp->txq_max;
12677 if (netif_running(dev)) {
12678 channel->rx_count = tp->rxq_cnt;
12679 channel->tx_count = tp->txq_cnt;
12682 channel->rx_count = tp->rxq_req;
12684 channel->rx_count = min(deflt_qs, tp->rxq_max);
12687 channel->tx_count = tp->txq_req;
12689 channel->tx_count = min(deflt_qs, tp->txq_max);
12693 static int tg3_set_channels(struct net_device *dev,
12694 struct ethtool_channels *channel)
12696 struct tg3 *tp = netdev_priv(dev);
12698 if (!tg3_flag(tp, SUPPORT_MSIX))
12699 return -EOPNOTSUPP;
12701 if (channel->rx_count > tp->rxq_max ||
12702 channel->tx_count > tp->txq_max)
12705 tp->rxq_req = channel->rx_count;
12706 tp->txq_req = channel->tx_count;
12708 if (!netif_running(dev))
12713 tg3_carrier_off(tp);
12715 tg3_start(tp, true, false, false);
12720 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12722 switch (stringset) {
12724 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12727 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12730 WARN_ON(1); /* we need a WARN() */
12735 static int tg3_set_phys_id(struct net_device *dev,
12736 enum ethtool_phys_id_state state)
12738 struct tg3 *tp = netdev_priv(dev);
12740 if (!netif_running(tp->dev))
12744 case ETHTOOL_ID_ACTIVE:
12745 return 1; /* cycle on/off once per second */
12747 case ETHTOOL_ID_ON:
12748 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12749 LED_CTRL_1000MBPS_ON |
12750 LED_CTRL_100MBPS_ON |
12751 LED_CTRL_10MBPS_ON |
12752 LED_CTRL_TRAFFIC_OVERRIDE |
12753 LED_CTRL_TRAFFIC_BLINK |
12754 LED_CTRL_TRAFFIC_LED);
12757 case ETHTOOL_ID_OFF:
12758 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12759 LED_CTRL_TRAFFIC_OVERRIDE);
12762 case ETHTOOL_ID_INACTIVE:
12763 tw32(MAC_LED_CTRL, tp->led_ctrl);
12770 static void tg3_get_ethtool_stats(struct net_device *dev,
12771 struct ethtool_stats *estats, u64 *tmp_stats)
12773 struct tg3 *tp = netdev_priv(dev);
12776 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12778 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12781 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12785 u32 offset = 0, len = 0;
12788 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12791 if (magic == TG3_EEPROM_MAGIC) {
12792 for (offset = TG3_NVM_DIR_START;
12793 offset < TG3_NVM_DIR_END;
12794 offset += TG3_NVM_DIRENT_SIZE) {
12795 if (tg3_nvram_read(tp, offset, &val))
12798 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12799 TG3_NVM_DIRTYPE_EXTVPD)
12803 if (offset != TG3_NVM_DIR_END) {
12804 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12805 if (tg3_nvram_read(tp, offset + 4, &offset))
12808 offset = tg3_nvram_logical_addr(tp, offset);
12812 if (!offset || !len) {
12813 offset = TG3_NVM_VPD_OFF;
12814 len = TG3_NVM_VPD_LEN;
12817 buf = kmalloc(len, GFP_KERNEL);
12821 if (magic == TG3_EEPROM_MAGIC) {
12822 for (i = 0; i < len; i += 4) {
12823 /* The data is in little-endian format in NVRAM.
12824 * Use the big-endian read routines to preserve
12825 * the byte order as it exists in NVRAM.
12827 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12833 unsigned int pos = 0;
12835 ptr = (u8 *)&buf[0];
12836 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12837 cnt = pci_read_vpd(tp->pdev, pos,
12839 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12857 #define NVRAM_TEST_SIZE 0x100
12858 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12859 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12860 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12861 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12862 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12863 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12864 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12865 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12867 static int tg3_test_nvram(struct tg3 *tp)
12869 u32 csum, magic, len;
12871 int i, j, k, err = 0, size;
12873 if (tg3_flag(tp, NO_NVRAM))
12876 if (tg3_nvram_read(tp, 0, &magic) != 0)
12879 if (magic == TG3_EEPROM_MAGIC)
12880 size = NVRAM_TEST_SIZE;
12881 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12882 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12883 TG3_EEPROM_SB_FORMAT_1) {
12884 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12885 case TG3_EEPROM_SB_REVISION_0:
12886 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12888 case TG3_EEPROM_SB_REVISION_2:
12889 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12891 case TG3_EEPROM_SB_REVISION_3:
12892 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12894 case TG3_EEPROM_SB_REVISION_4:
12895 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12897 case TG3_EEPROM_SB_REVISION_5:
12898 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12900 case TG3_EEPROM_SB_REVISION_6:
12901 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12908 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12909 size = NVRAM_SELFBOOT_HW_SIZE;
12913 buf = kmalloc(size, GFP_KERNEL);
12918 for (i = 0, j = 0; i < size; i += 4, j++) {
12919 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12926 /* Selfboot format */
12927 magic = be32_to_cpu(buf[0]);
12928 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12929 TG3_EEPROM_MAGIC_FW) {
12930 u8 *buf8 = (u8 *) buf, csum8 = 0;
12932 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12933 TG3_EEPROM_SB_REVISION_2) {
12934 /* For rev 2, the csum doesn't include the MBA. */
12935 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12937 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12940 for (i = 0; i < size; i++)
12953 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12954 TG3_EEPROM_MAGIC_HW) {
12955 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12956 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12957 u8 *buf8 = (u8 *) buf;
12959 /* Separate the parity bits and the data bytes. */
12960 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12961 if ((i == 0) || (i == 8)) {
12965 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12966 parity[k++] = buf8[i] & msk;
12968 } else if (i == 16) {
12972 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12973 parity[k++] = buf8[i] & msk;
12976 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12977 parity[k++] = buf8[i] & msk;
12980 data[j++] = buf8[i];
12984 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12985 u8 hw8 = hweight8(data[i]);
12987 if ((hw8 & 0x1) && parity[i])
12989 else if (!(hw8 & 0x1) && !parity[i])
12998 /* Bootstrap checksum at offset 0x10 */
12999 csum = calc_crc((unsigned char *) buf, 0x10);
13000 if (csum != le32_to_cpu(buf[0x10/4]))
13003 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13004 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13005 if (csum != le32_to_cpu(buf[0xfc/4]))
13010 buf = tg3_vpd_readblock(tp, &len);
13014 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13016 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13020 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13023 i += PCI_VPD_LRDT_TAG_SIZE;
13024 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13025 PCI_VPD_RO_KEYWORD_CHKSUM);
13029 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13031 for (i = 0; i <= j; i++)
13032 csum8 += ((u8 *)buf)[i];
13046 #define TG3_SERDES_TIMEOUT_SEC 2
13047 #define TG3_COPPER_TIMEOUT_SEC 6
13049 static int tg3_test_link(struct tg3 *tp)
13053 if (!netif_running(tp->dev))
13056 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13057 max = TG3_SERDES_TIMEOUT_SEC;
13059 max = TG3_COPPER_TIMEOUT_SEC;
13061 for (i = 0; i < max; i++) {
13065 if (msleep_interruptible(1000))
13072 /* Only test the commonly used registers */
13073 static int tg3_test_registers(struct tg3 *tp)
13075 int i, is_5705, is_5750;
13076 u32 offset, read_mask, write_mask, val, save_val, read_val;
13080 #define TG3_FL_5705 0x1
13081 #define TG3_FL_NOT_5705 0x2
13082 #define TG3_FL_NOT_5788 0x4
13083 #define TG3_FL_NOT_5750 0x8
13087 /* MAC Control Registers */
13088 { MAC_MODE, TG3_FL_NOT_5705,
13089 0x00000000, 0x00ef6f8c },
13090 { MAC_MODE, TG3_FL_5705,
13091 0x00000000, 0x01ef6b8c },
13092 { MAC_STATUS, TG3_FL_NOT_5705,
13093 0x03800107, 0x00000000 },
13094 { MAC_STATUS, TG3_FL_5705,
13095 0x03800100, 0x00000000 },
13096 { MAC_ADDR_0_HIGH, 0x0000,
13097 0x00000000, 0x0000ffff },
13098 { MAC_ADDR_0_LOW, 0x0000,
13099 0x00000000, 0xffffffff },
13100 { MAC_RX_MTU_SIZE, 0x0000,
13101 0x00000000, 0x0000ffff },
13102 { MAC_TX_MODE, 0x0000,
13103 0x00000000, 0x00000070 },
13104 { MAC_TX_LENGTHS, 0x0000,
13105 0x00000000, 0x00003fff },
13106 { MAC_RX_MODE, TG3_FL_NOT_5705,
13107 0x00000000, 0x000007fc },
13108 { MAC_RX_MODE, TG3_FL_5705,
13109 0x00000000, 0x000007dc },
13110 { MAC_HASH_REG_0, 0x0000,
13111 0x00000000, 0xffffffff },
13112 { MAC_HASH_REG_1, 0x0000,
13113 0x00000000, 0xffffffff },
13114 { MAC_HASH_REG_2, 0x0000,
13115 0x00000000, 0xffffffff },
13116 { MAC_HASH_REG_3, 0x0000,
13117 0x00000000, 0xffffffff },
13119 /* Receive Data and Receive BD Initiator Control Registers. */
13120 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13121 0x00000000, 0xffffffff },
13122 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13123 0x00000000, 0xffffffff },
13124 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13125 0x00000000, 0x00000003 },
13126 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13127 0x00000000, 0xffffffff },
13128 { RCVDBDI_STD_BD+0, 0x0000,
13129 0x00000000, 0xffffffff },
13130 { RCVDBDI_STD_BD+4, 0x0000,
13131 0x00000000, 0xffffffff },
13132 { RCVDBDI_STD_BD+8, 0x0000,
13133 0x00000000, 0xffff0002 },
13134 { RCVDBDI_STD_BD+0xc, 0x0000,
13135 0x00000000, 0xffffffff },
13137 /* Receive BD Initiator Control Registers. */
13138 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13139 0x00000000, 0xffffffff },
13140 { RCVBDI_STD_THRESH, TG3_FL_5705,
13141 0x00000000, 0x000003ff },
13142 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13143 0x00000000, 0xffffffff },
13145 /* Host Coalescing Control Registers. */
13146 { HOSTCC_MODE, TG3_FL_NOT_5705,
13147 0x00000000, 0x00000004 },
13148 { HOSTCC_MODE, TG3_FL_5705,
13149 0x00000000, 0x000000f6 },
13150 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13151 0x00000000, 0xffffffff },
13152 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13153 0x00000000, 0x000003ff },
13154 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13155 0x00000000, 0xffffffff },
13156 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13157 0x00000000, 0x000003ff },
13158 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13159 0x00000000, 0xffffffff },
13160 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13161 0x00000000, 0x000000ff },
13162 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13163 0x00000000, 0xffffffff },
13164 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13165 0x00000000, 0x000000ff },
13166 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13167 0x00000000, 0xffffffff },
13168 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13169 0x00000000, 0xffffffff },
13170 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13171 0x00000000, 0xffffffff },
13172 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13173 0x00000000, 0x000000ff },
13174 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13175 0x00000000, 0xffffffff },
13176 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13177 0x00000000, 0x000000ff },
13178 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13179 0x00000000, 0xffffffff },
13180 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13181 0x00000000, 0xffffffff },
13182 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13183 0x00000000, 0xffffffff },
13184 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13185 0x00000000, 0xffffffff },
13186 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13187 0x00000000, 0xffffffff },
13188 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13189 0xffffffff, 0x00000000 },
13190 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13191 0xffffffff, 0x00000000 },
13193 /* Buffer Manager Control Registers. */
13194 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13195 0x00000000, 0x007fff80 },
13196 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13197 0x00000000, 0x007fffff },
13198 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13199 0x00000000, 0x0000003f },
13200 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13201 0x00000000, 0x000001ff },
13202 { BUFMGR_MB_HIGH_WATER, 0x0000,
13203 0x00000000, 0x000001ff },
13204 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13205 0xffffffff, 0x00000000 },
13206 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13207 0xffffffff, 0x00000000 },
13209 /* Mailbox Registers */
13210 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13211 0x00000000, 0x000001ff },
13212 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13213 0x00000000, 0x000001ff },
13214 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13215 0x00000000, 0x000007ff },
13216 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13217 0x00000000, 0x000001ff },
13219 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13222 is_5705 = is_5750 = 0;
13223 if (tg3_flag(tp, 5705_PLUS)) {
13225 if (tg3_flag(tp, 5750_PLUS))
13229 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13230 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13233 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13236 if (tg3_flag(tp, IS_5788) &&
13237 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13240 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13243 offset = (u32) reg_tbl[i].offset;
13244 read_mask = reg_tbl[i].read_mask;
13245 write_mask = reg_tbl[i].write_mask;
13247 /* Save the original register content */
13248 save_val = tr32(offset);
13250 /* Determine the read-only value. */
13251 read_val = save_val & read_mask;
13253 /* Write zero to the register, then make sure the read-only bits
13254 * are not changed and the read/write bits are all zeros.
13258 val = tr32(offset);
13260 /* Test the read-only and read/write bits. */
13261 if (((val & read_mask) != read_val) || (val & write_mask))
13264 /* Write ones to all the bits defined by RdMask and WrMask, then
13265 * make sure the read-only bits are not changed and the
13266 * read/write bits are all ones.
13268 tw32(offset, read_mask | write_mask);
13270 val = tr32(offset);
13272 /* Test the read-only bits. */
13273 if ((val & read_mask) != read_val)
13276 /* Test the read/write bits. */
13277 if ((val & write_mask) != write_mask)
13280 tw32(offset, save_val);
13286 if (netif_msg_hw(tp))
13287 netdev_err(tp->dev,
13288 "Register test failed at offset %x\n", offset);
13289 tw32(offset, save_val);
13293 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13295 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13299 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13300 for (j = 0; j < len; j += 4) {
13303 tg3_write_mem(tp, offset + j, test_pattern[i]);
13304 tg3_read_mem(tp, offset + j, &val);
13305 if (val != test_pattern[i])
13312 static int tg3_test_memory(struct tg3 *tp)
13314 static struct mem_entry {
13317 } mem_tbl_570x[] = {
13318 { 0x00000000, 0x00b50},
13319 { 0x00002000, 0x1c000},
13320 { 0xffffffff, 0x00000}
13321 }, mem_tbl_5705[] = {
13322 { 0x00000100, 0x0000c},
13323 { 0x00000200, 0x00008},
13324 { 0x00004000, 0x00800},
13325 { 0x00006000, 0x01000},
13326 { 0x00008000, 0x02000},
13327 { 0x00010000, 0x0e000},
13328 { 0xffffffff, 0x00000}
13329 }, mem_tbl_5755[] = {
13330 { 0x00000200, 0x00008},
13331 { 0x00004000, 0x00800},
13332 { 0x00006000, 0x00800},
13333 { 0x00008000, 0x02000},
13334 { 0x00010000, 0x0c000},
13335 { 0xffffffff, 0x00000}
13336 }, mem_tbl_5906[] = {
13337 { 0x00000200, 0x00008},
13338 { 0x00004000, 0x00400},
13339 { 0x00006000, 0x00400},
13340 { 0x00008000, 0x01000},
13341 { 0x00010000, 0x01000},
13342 { 0xffffffff, 0x00000}
13343 }, mem_tbl_5717[] = {
13344 { 0x00000200, 0x00008},
13345 { 0x00010000, 0x0a000},
13346 { 0x00020000, 0x13c00},
13347 { 0xffffffff, 0x00000}
13348 }, mem_tbl_57765[] = {
13349 { 0x00000200, 0x00008},
13350 { 0x00004000, 0x00800},
13351 { 0x00006000, 0x09800},
13352 { 0x00010000, 0x0a000},
13353 { 0xffffffff, 0x00000}
13355 struct mem_entry *mem_tbl;
13359 if (tg3_flag(tp, 5717_PLUS))
13360 mem_tbl = mem_tbl_5717;
13361 else if (tg3_flag(tp, 57765_CLASS) ||
13362 tg3_asic_rev(tp) == ASIC_REV_5762)
13363 mem_tbl = mem_tbl_57765;
13364 else if (tg3_flag(tp, 5755_PLUS))
13365 mem_tbl = mem_tbl_5755;
13366 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13367 mem_tbl = mem_tbl_5906;
13368 else if (tg3_flag(tp, 5705_PLUS))
13369 mem_tbl = mem_tbl_5705;
13371 mem_tbl = mem_tbl_570x;
13373 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13374 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13382 #define TG3_TSO_MSS 500
13384 #define TG3_TSO_IP_HDR_LEN 20
13385 #define TG3_TSO_TCP_HDR_LEN 20
13386 #define TG3_TSO_TCP_OPT_LEN 12
13388 static const u8 tg3_tso_header[] = {
13390 0x45, 0x00, 0x00, 0x00,
13391 0x00, 0x00, 0x40, 0x00,
13392 0x40, 0x06, 0x00, 0x00,
13393 0x0a, 0x00, 0x00, 0x01,
13394 0x0a, 0x00, 0x00, 0x02,
13395 0x0d, 0x00, 0xe0, 0x00,
13396 0x00, 0x00, 0x01, 0x00,
13397 0x00, 0x00, 0x02, 0x00,
13398 0x80, 0x10, 0x10, 0x00,
13399 0x14, 0x09, 0x00, 0x00,
13400 0x01, 0x01, 0x08, 0x0a,
13401 0x11, 0x11, 0x11, 0x11,
13402 0x11, 0x11, 0x11, 0x11,
13405 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13407 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13408 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13410 struct sk_buff *skb;
13411 u8 *tx_data, *rx_data;
13413 int num_pkts, tx_len, rx_len, i, err;
13414 struct tg3_rx_buffer_desc *desc;
13415 struct tg3_napi *tnapi, *rnapi;
13416 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13418 tnapi = &tp->napi[0];
13419 rnapi = &tp->napi[0];
13420 if (tp->irq_cnt > 1) {
13421 if (tg3_flag(tp, ENABLE_RSS))
13422 rnapi = &tp->napi[1];
13423 if (tg3_flag(tp, ENABLE_TSS))
13424 tnapi = &tp->napi[1];
13426 coal_now = tnapi->coal_now | rnapi->coal_now;
13431 skb = netdev_alloc_skb(tp->dev, tx_len);
13435 tx_data = skb_put(skb, tx_len);
13436 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13437 memset(tx_data + ETH_ALEN, 0x0, 8);
13439 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13441 if (tso_loopback) {
13442 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13444 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13445 TG3_TSO_TCP_OPT_LEN;
13447 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13448 sizeof(tg3_tso_header));
13451 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13452 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13454 /* Set the total length field in the IP header */
13455 iph->tot_len = htons((u16)(mss + hdr_len));
13457 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13458 TXD_FLAG_CPU_POST_DMA);
13460 if (tg3_flag(tp, HW_TSO_1) ||
13461 tg3_flag(tp, HW_TSO_2) ||
13462 tg3_flag(tp, HW_TSO_3)) {
13464 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13465 th = (struct tcphdr *)&tx_data[val];
13468 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13470 if (tg3_flag(tp, HW_TSO_3)) {
13471 mss |= (hdr_len & 0xc) << 12;
13472 if (hdr_len & 0x10)
13473 base_flags |= 0x00000010;
13474 base_flags |= (hdr_len & 0x3e0) << 5;
13475 } else if (tg3_flag(tp, HW_TSO_2))
13476 mss |= hdr_len << 9;
13477 else if (tg3_flag(tp, HW_TSO_1) ||
13478 tg3_asic_rev(tp) == ASIC_REV_5705) {
13479 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13481 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13484 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13487 data_off = ETH_HLEN;
13489 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13490 tx_len > VLAN_ETH_FRAME_LEN)
13491 base_flags |= TXD_FLAG_JMB_PKT;
13494 for (i = data_off; i < tx_len; i++)
13495 tx_data[i] = (u8) (i & 0xff);
13497 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13498 if (pci_dma_mapping_error(tp->pdev, map)) {
13499 dev_kfree_skb(skb);
13503 val = tnapi->tx_prod;
13504 tnapi->tx_buffers[val].skb = skb;
13505 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13507 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13512 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13514 budget = tg3_tx_avail(tnapi);
13515 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13516 base_flags | TXD_FLAG_END, mss, 0)) {
13517 tnapi->tx_buffers[val].skb = NULL;
13518 dev_kfree_skb(skb);
13524 /* Sync BD data before updating mailbox */
13527 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13528 tr32_mailbox(tnapi->prodmbox);
13532 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13533 for (i = 0; i < 35; i++) {
13534 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13539 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13540 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13541 if ((tx_idx == tnapi->tx_prod) &&
13542 (rx_idx == (rx_start_idx + num_pkts)))
13546 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13547 dev_kfree_skb(skb);
13549 if (tx_idx != tnapi->tx_prod)
13552 if (rx_idx != rx_start_idx + num_pkts)
13556 while (rx_idx != rx_start_idx) {
13557 desc = &rnapi->rx_rcb[rx_start_idx++];
13558 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13559 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13561 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13562 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13565 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13568 if (!tso_loopback) {
13569 if (rx_len != tx_len)
13572 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13573 if (opaque_key != RXD_OPAQUE_RING_STD)
13576 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13579 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13580 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13581 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13585 if (opaque_key == RXD_OPAQUE_RING_STD) {
13586 rx_data = tpr->rx_std_buffers[desc_idx].data;
13587 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13589 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13590 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13591 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13596 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13597 PCI_DMA_FROMDEVICE);
13599 rx_data += TG3_RX_OFFSET(tp);
13600 for (i = data_off; i < rx_len; i++, val++) {
13601 if (*(rx_data + i) != (u8) (val & 0xff))
13608 /* tg3_free_rings will unmap and free the rx_data */
13613 #define TG3_STD_LOOPBACK_FAILED 1
13614 #define TG3_JMB_LOOPBACK_FAILED 2
13615 #define TG3_TSO_LOOPBACK_FAILED 4
13616 #define TG3_LOOPBACK_FAILED \
13617 (TG3_STD_LOOPBACK_FAILED | \
13618 TG3_JMB_LOOPBACK_FAILED | \
13619 TG3_TSO_LOOPBACK_FAILED)
13621 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13625 u32 jmb_pkt_sz = 9000;
13628 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13630 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13631 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13633 if (!netif_running(tp->dev)) {
13634 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13635 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13637 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13641 err = tg3_reset_hw(tp, true);
13643 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13644 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13646 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13650 if (tg3_flag(tp, ENABLE_RSS)) {
13653 /* Reroute all rx packets to the 1st queue */
13654 for (i = MAC_RSS_INDIR_TBL_0;
13655 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13659 /* HW errata - mac loopback fails in some cases on 5780.
13660 * Normal traffic and PHY loopback are not affected by
13661 * errata. Also, the MAC loopback test is deprecated for
13662 * all newer ASIC revisions.
13664 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13665 !tg3_flag(tp, CPMU_PRESENT)) {
13666 tg3_mac_loopback(tp, true);
13668 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13669 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13671 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13672 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13673 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13675 tg3_mac_loopback(tp, false);
13678 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13679 !tg3_flag(tp, USE_PHYLIB)) {
13682 tg3_phy_lpbk_set(tp, 0, false);
13684 /* Wait for link */
13685 for (i = 0; i < 100; i++) {
13686 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13691 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13692 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13693 if (tg3_flag(tp, TSO_CAPABLE) &&
13694 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13695 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13696 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13697 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13698 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13701 tg3_phy_lpbk_set(tp, 0, true);
13703 /* All link indications report up, but the hardware
13704 * isn't really ready for about 20 msec. Double it
13709 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13710 data[TG3_EXT_LOOPB_TEST] |=
13711 TG3_STD_LOOPBACK_FAILED;
13712 if (tg3_flag(tp, TSO_CAPABLE) &&
13713 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13714 data[TG3_EXT_LOOPB_TEST] |=
13715 TG3_TSO_LOOPBACK_FAILED;
13716 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13717 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13718 data[TG3_EXT_LOOPB_TEST] |=
13719 TG3_JMB_LOOPBACK_FAILED;
13722 /* Re-enable gphy autopowerdown. */
13723 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13724 tg3_phy_toggle_apd(tp, true);
13727 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13728 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13731 tp->phy_flags |= eee_cap;
13736 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13739 struct tg3 *tp = netdev_priv(dev);
13740 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13742 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13743 if (tg3_power_up(tp)) {
13744 etest->flags |= ETH_TEST_FL_FAILED;
13745 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13748 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13751 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13753 if (tg3_test_nvram(tp) != 0) {
13754 etest->flags |= ETH_TEST_FL_FAILED;
13755 data[TG3_NVRAM_TEST] = 1;
13757 if (!doextlpbk && tg3_test_link(tp)) {
13758 etest->flags |= ETH_TEST_FL_FAILED;
13759 data[TG3_LINK_TEST] = 1;
13761 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13762 int err, err2 = 0, irq_sync = 0;
13764 if (netif_running(dev)) {
13766 tg3_netif_stop(tp);
13770 tg3_full_lock(tp, irq_sync);
13771 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13772 err = tg3_nvram_lock(tp);
13773 tg3_halt_cpu(tp, RX_CPU_BASE);
13774 if (!tg3_flag(tp, 5705_PLUS))
13775 tg3_halt_cpu(tp, TX_CPU_BASE);
13777 tg3_nvram_unlock(tp);
13779 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13782 if (tg3_test_registers(tp) != 0) {
13783 etest->flags |= ETH_TEST_FL_FAILED;
13784 data[TG3_REGISTER_TEST] = 1;
13787 if (tg3_test_memory(tp) != 0) {
13788 etest->flags |= ETH_TEST_FL_FAILED;
13789 data[TG3_MEMORY_TEST] = 1;
13793 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13795 if (tg3_test_loopback(tp, data, doextlpbk))
13796 etest->flags |= ETH_TEST_FL_FAILED;
13798 tg3_full_unlock(tp);
13800 if (tg3_test_interrupt(tp) != 0) {
13801 etest->flags |= ETH_TEST_FL_FAILED;
13802 data[TG3_INTERRUPT_TEST] = 1;
13805 tg3_full_lock(tp, 0);
13807 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13808 if (netif_running(dev)) {
13809 tg3_flag_set(tp, INIT_COMPLETE);
13810 err2 = tg3_restart_hw(tp, true);
13812 tg3_netif_start(tp);
13815 tg3_full_unlock(tp);
13817 if (irq_sync && !err2)
13820 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13821 tg3_power_down_prepare(tp);
13825 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13827 struct tg3 *tp = netdev_priv(dev);
13828 struct hwtstamp_config stmpconf;
13830 if (!tg3_flag(tp, PTP_CAPABLE))
13831 return -EOPNOTSUPP;
13833 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13836 if (stmpconf.flags)
13839 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13840 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13843 switch (stmpconf.rx_filter) {
13844 case HWTSTAMP_FILTER_NONE:
13847 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13848 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13849 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13851 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13852 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13853 TG3_RX_PTP_CTL_SYNC_EVNT;
13855 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13856 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13857 TG3_RX_PTP_CTL_DELAY_REQ;
13859 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13860 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13861 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13863 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13864 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13865 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13867 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13868 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13869 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13871 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13872 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13873 TG3_RX_PTP_CTL_SYNC_EVNT;
13875 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13876 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13877 TG3_RX_PTP_CTL_SYNC_EVNT;
13879 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13880 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13881 TG3_RX_PTP_CTL_SYNC_EVNT;
13883 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13884 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13885 TG3_RX_PTP_CTL_DELAY_REQ;
13887 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13888 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13889 TG3_RX_PTP_CTL_DELAY_REQ;
13891 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13892 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13893 TG3_RX_PTP_CTL_DELAY_REQ;
13899 if (netif_running(dev) && tp->rxptpctl)
13900 tw32(TG3_RX_PTP_CTL,
13901 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13903 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13904 tg3_flag_set(tp, TX_TSTAMP_EN);
13906 tg3_flag_clear(tp, TX_TSTAMP_EN);
13908 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13912 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13914 struct tg3 *tp = netdev_priv(dev);
13915 struct hwtstamp_config stmpconf;
13917 if (!tg3_flag(tp, PTP_CAPABLE))
13918 return -EOPNOTSUPP;
13920 stmpconf.flags = 0;
13921 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13922 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13924 switch (tp->rxptpctl) {
13926 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13928 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13929 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13931 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13932 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13934 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13935 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13937 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13938 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13940 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13941 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13943 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13944 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13946 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13947 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13949 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13950 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13952 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13953 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13955 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13956 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13958 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13959 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13961 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13962 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13969 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13973 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13975 struct mii_ioctl_data *data = if_mii(ifr);
13976 struct tg3 *tp = netdev_priv(dev);
13979 if (tg3_flag(tp, USE_PHYLIB)) {
13980 struct phy_device *phydev;
13981 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13983 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13984 return phy_mii_ioctl(phydev, ifr, cmd);
13989 data->phy_id = tp->phy_addr;
13992 case SIOCGMIIREG: {
13995 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13996 break; /* We have no PHY */
13998 if (!netif_running(dev))
14001 spin_lock_bh(&tp->lock);
14002 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14003 data->reg_num & 0x1f, &mii_regval);
14004 spin_unlock_bh(&tp->lock);
14006 data->val_out = mii_regval;
14012 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14013 break; /* We have no PHY */
14015 if (!netif_running(dev))
14018 spin_lock_bh(&tp->lock);
14019 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14020 data->reg_num & 0x1f, data->val_in);
14021 spin_unlock_bh(&tp->lock);
14025 case SIOCSHWTSTAMP:
14026 return tg3_hwtstamp_set(dev, ifr);
14028 case SIOCGHWTSTAMP:
14029 return tg3_hwtstamp_get(dev, ifr);
14035 return -EOPNOTSUPP;
14038 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14040 struct tg3 *tp = netdev_priv(dev);
14042 memcpy(ec, &tp->coal, sizeof(*ec));
14046 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14048 struct tg3 *tp = netdev_priv(dev);
14049 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14050 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14052 if (!tg3_flag(tp, 5705_PLUS)) {
14053 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14054 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14055 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14056 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14059 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14060 (!ec->rx_coalesce_usecs) ||
14061 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14062 (!ec->tx_coalesce_usecs) ||
14063 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14064 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14065 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14066 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14067 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14068 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14069 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14070 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14073 /* Only copy relevant parameters, ignore all others. */
14074 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14075 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14076 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14077 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14078 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14079 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14080 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14081 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14082 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14084 if (netif_running(dev)) {
14085 tg3_full_lock(tp, 0);
14086 __tg3_set_coalesce(tp, &tp->coal);
14087 tg3_full_unlock(tp);
14092 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14094 struct tg3 *tp = netdev_priv(dev);
14096 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14097 netdev_warn(tp->dev, "Board does not support EEE!\n");
14098 return -EOPNOTSUPP;
14101 if (edata->advertised != tp->eee.advertised) {
14102 netdev_warn(tp->dev,
14103 "Direct manipulation of EEE advertisement is not supported\n");
14107 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14108 netdev_warn(tp->dev,
14109 "Maximal Tx Lpi timer supported is %#x(u)\n",
14110 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14116 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14117 tg3_warn_mgmt_link_flap(tp);
14119 if (netif_running(tp->dev)) {
14120 tg3_full_lock(tp, 0);
14123 tg3_full_unlock(tp);
14129 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14131 struct tg3 *tp = netdev_priv(dev);
14133 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14134 netdev_warn(tp->dev,
14135 "Board does not support EEE!\n");
14136 return -EOPNOTSUPP;
14143 static const struct ethtool_ops tg3_ethtool_ops = {
14144 .get_drvinfo = tg3_get_drvinfo,
14145 .get_regs_len = tg3_get_regs_len,
14146 .get_regs = tg3_get_regs,
14147 .get_wol = tg3_get_wol,
14148 .set_wol = tg3_set_wol,
14149 .get_msglevel = tg3_get_msglevel,
14150 .set_msglevel = tg3_set_msglevel,
14151 .nway_reset = tg3_nway_reset,
14152 .get_link = ethtool_op_get_link,
14153 .get_eeprom_len = tg3_get_eeprom_len,
14154 .get_eeprom = tg3_get_eeprom,
14155 .set_eeprom = tg3_set_eeprom,
14156 .get_ringparam = tg3_get_ringparam,
14157 .set_ringparam = tg3_set_ringparam,
14158 .get_pauseparam = tg3_get_pauseparam,
14159 .set_pauseparam = tg3_set_pauseparam,
14160 .self_test = tg3_self_test,
14161 .get_strings = tg3_get_strings,
14162 .set_phys_id = tg3_set_phys_id,
14163 .get_ethtool_stats = tg3_get_ethtool_stats,
14164 .get_coalesce = tg3_get_coalesce,
14165 .set_coalesce = tg3_set_coalesce,
14166 .get_sset_count = tg3_get_sset_count,
14167 .get_rxnfc = tg3_get_rxnfc,
14168 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14169 .get_rxfh = tg3_get_rxfh,
14170 .set_rxfh = tg3_set_rxfh,
14171 .get_channels = tg3_get_channels,
14172 .set_channels = tg3_set_channels,
14173 .get_ts_info = tg3_get_ts_info,
14174 .get_eee = tg3_get_eee,
14175 .set_eee = tg3_set_eee,
14176 .get_link_ksettings = tg3_get_link_ksettings,
14177 .set_link_ksettings = tg3_set_link_ksettings,
14180 static void tg3_get_stats64(struct net_device *dev,
14181 struct rtnl_link_stats64 *stats)
14183 struct tg3 *tp = netdev_priv(dev);
14185 spin_lock_bh(&tp->lock);
14186 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14187 *stats = tp->net_stats_prev;
14188 spin_unlock_bh(&tp->lock);
14192 tg3_get_nstats(tp, stats);
14193 spin_unlock_bh(&tp->lock);
14196 static void tg3_set_rx_mode(struct net_device *dev)
14198 struct tg3 *tp = netdev_priv(dev);
14200 if (!netif_running(dev))
14203 tg3_full_lock(tp, 0);
14204 __tg3_set_rx_mode(dev);
14205 tg3_full_unlock(tp);
14208 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14211 dev->mtu = new_mtu;
14213 if (new_mtu > ETH_DATA_LEN) {
14214 if (tg3_flag(tp, 5780_CLASS)) {
14215 netdev_update_features(dev);
14216 tg3_flag_clear(tp, TSO_CAPABLE);
14218 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14221 if (tg3_flag(tp, 5780_CLASS)) {
14222 tg3_flag_set(tp, TSO_CAPABLE);
14223 netdev_update_features(dev);
14225 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14229 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14231 struct tg3 *tp = netdev_priv(dev);
14233 bool reset_phy = false;
14235 if (!netif_running(dev)) {
14236 /* We'll just catch it later when the
14239 tg3_set_mtu(dev, tp, new_mtu);
14245 tg3_netif_stop(tp);
14247 tg3_set_mtu(dev, tp, new_mtu);
14249 tg3_full_lock(tp, 1);
14251 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14253 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14254 * breaks all requests to 256 bytes.
14256 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14257 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14258 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14259 tg3_asic_rev(tp) == ASIC_REV_5720)
14262 err = tg3_restart_hw(tp, reset_phy);
14265 tg3_netif_start(tp);
14267 tg3_full_unlock(tp);
14275 static const struct net_device_ops tg3_netdev_ops = {
14276 .ndo_open = tg3_open,
14277 .ndo_stop = tg3_close,
14278 .ndo_start_xmit = tg3_start_xmit,
14279 .ndo_get_stats64 = tg3_get_stats64,
14280 .ndo_validate_addr = eth_validate_addr,
14281 .ndo_set_rx_mode = tg3_set_rx_mode,
14282 .ndo_set_mac_address = tg3_set_mac_addr,
14283 .ndo_do_ioctl = tg3_ioctl,
14284 .ndo_tx_timeout = tg3_tx_timeout,
14285 .ndo_change_mtu = tg3_change_mtu,
14286 .ndo_fix_features = tg3_fix_features,
14287 .ndo_set_features = tg3_set_features,
14288 #ifdef CONFIG_NET_POLL_CONTROLLER
14289 .ndo_poll_controller = tg3_poll_controller,
14293 static void tg3_get_eeprom_size(struct tg3 *tp)
14295 u32 cursize, val, magic;
14297 tp->nvram_size = EEPROM_CHIP_SIZE;
14299 if (tg3_nvram_read(tp, 0, &magic) != 0)
14302 if ((magic != TG3_EEPROM_MAGIC) &&
14303 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14304 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14308 * Size the chip by reading offsets at increasing powers of two.
14309 * When we encounter our validation signature, we know the addressing
14310 * has wrapped around, and thus have our chip size.
14314 while (cursize < tp->nvram_size) {
14315 if (tg3_nvram_read(tp, cursize, &val) != 0)
14324 tp->nvram_size = cursize;
14327 static void tg3_get_nvram_size(struct tg3 *tp)
14331 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14334 /* Selfboot format */
14335 if (val != TG3_EEPROM_MAGIC) {
14336 tg3_get_eeprom_size(tp);
14340 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14342 /* This is confusing. We want to operate on the
14343 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14344 * call will read from NVRAM and byteswap the data
14345 * according to the byteswapping settings for all
14346 * other register accesses. This ensures the data we
14347 * want will always reside in the lower 16-bits.
14348 * However, the data in NVRAM is in LE format, which
14349 * means the data from the NVRAM read will always be
14350 * opposite the endianness of the CPU. The 16-bit
14351 * byteswap then brings the data to CPU endianness.
14353 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14357 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14360 static void tg3_get_nvram_info(struct tg3 *tp)
14364 nvcfg1 = tr32(NVRAM_CFG1);
14365 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14366 tg3_flag_set(tp, FLASH);
14368 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14369 tw32(NVRAM_CFG1, nvcfg1);
14372 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14373 tg3_flag(tp, 5780_CLASS)) {
14374 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14375 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14376 tp->nvram_jedecnum = JEDEC_ATMEL;
14377 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14378 tg3_flag_set(tp, NVRAM_BUFFERED);
14380 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14381 tp->nvram_jedecnum = JEDEC_ATMEL;
14382 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14384 case FLASH_VENDOR_ATMEL_EEPROM:
14385 tp->nvram_jedecnum = JEDEC_ATMEL;
14386 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14387 tg3_flag_set(tp, NVRAM_BUFFERED);
14389 case FLASH_VENDOR_ST:
14390 tp->nvram_jedecnum = JEDEC_ST;
14391 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14392 tg3_flag_set(tp, NVRAM_BUFFERED);
14394 case FLASH_VENDOR_SAIFUN:
14395 tp->nvram_jedecnum = JEDEC_SAIFUN;
14396 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14398 case FLASH_VENDOR_SST_SMALL:
14399 case FLASH_VENDOR_SST_LARGE:
14400 tp->nvram_jedecnum = JEDEC_SST;
14401 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14405 tp->nvram_jedecnum = JEDEC_ATMEL;
14406 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14407 tg3_flag_set(tp, NVRAM_BUFFERED);
14411 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14413 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14414 case FLASH_5752PAGE_SIZE_256:
14415 tp->nvram_pagesize = 256;
14417 case FLASH_5752PAGE_SIZE_512:
14418 tp->nvram_pagesize = 512;
14420 case FLASH_5752PAGE_SIZE_1K:
14421 tp->nvram_pagesize = 1024;
14423 case FLASH_5752PAGE_SIZE_2K:
14424 tp->nvram_pagesize = 2048;
14426 case FLASH_5752PAGE_SIZE_4K:
14427 tp->nvram_pagesize = 4096;
14429 case FLASH_5752PAGE_SIZE_264:
14430 tp->nvram_pagesize = 264;
14432 case FLASH_5752PAGE_SIZE_528:
14433 tp->nvram_pagesize = 528;
14438 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14442 nvcfg1 = tr32(NVRAM_CFG1);
14444 /* NVRAM protection for TPM */
14445 if (nvcfg1 & (1 << 27))
14446 tg3_flag_set(tp, PROTECTED_NVRAM);
14448 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14449 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14450 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14451 tp->nvram_jedecnum = JEDEC_ATMEL;
14452 tg3_flag_set(tp, NVRAM_BUFFERED);
14454 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14455 tp->nvram_jedecnum = JEDEC_ATMEL;
14456 tg3_flag_set(tp, NVRAM_BUFFERED);
14457 tg3_flag_set(tp, FLASH);
14459 case FLASH_5752VENDOR_ST_M45PE10:
14460 case FLASH_5752VENDOR_ST_M45PE20:
14461 case FLASH_5752VENDOR_ST_M45PE40:
14462 tp->nvram_jedecnum = JEDEC_ST;
14463 tg3_flag_set(tp, NVRAM_BUFFERED);
14464 tg3_flag_set(tp, FLASH);
14468 if (tg3_flag(tp, FLASH)) {
14469 tg3_nvram_get_pagesize(tp, nvcfg1);
14471 /* For eeprom, set pagesize to maximum eeprom size */
14472 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14474 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14475 tw32(NVRAM_CFG1, nvcfg1);
14479 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14481 u32 nvcfg1, protect = 0;
14483 nvcfg1 = tr32(NVRAM_CFG1);
14485 /* NVRAM protection for TPM */
14486 if (nvcfg1 & (1 << 27)) {
14487 tg3_flag_set(tp, PROTECTED_NVRAM);
14491 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14493 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14494 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14495 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14496 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14497 tp->nvram_jedecnum = JEDEC_ATMEL;
14498 tg3_flag_set(tp, NVRAM_BUFFERED);
14499 tg3_flag_set(tp, FLASH);
14500 tp->nvram_pagesize = 264;
14501 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14502 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14503 tp->nvram_size = (protect ? 0x3e200 :
14504 TG3_NVRAM_SIZE_512KB);
14505 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14506 tp->nvram_size = (protect ? 0x1f200 :
14507 TG3_NVRAM_SIZE_256KB);
14509 tp->nvram_size = (protect ? 0x1f200 :
14510 TG3_NVRAM_SIZE_128KB);
14512 case FLASH_5752VENDOR_ST_M45PE10:
14513 case FLASH_5752VENDOR_ST_M45PE20:
14514 case FLASH_5752VENDOR_ST_M45PE40:
14515 tp->nvram_jedecnum = JEDEC_ST;
14516 tg3_flag_set(tp, NVRAM_BUFFERED);
14517 tg3_flag_set(tp, FLASH);
14518 tp->nvram_pagesize = 256;
14519 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14520 tp->nvram_size = (protect ?
14521 TG3_NVRAM_SIZE_64KB :
14522 TG3_NVRAM_SIZE_128KB);
14523 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14524 tp->nvram_size = (protect ?
14525 TG3_NVRAM_SIZE_64KB :
14526 TG3_NVRAM_SIZE_256KB);
14528 tp->nvram_size = (protect ?
14529 TG3_NVRAM_SIZE_128KB :
14530 TG3_NVRAM_SIZE_512KB);
14535 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14539 nvcfg1 = tr32(NVRAM_CFG1);
14541 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14542 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14543 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14544 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14545 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14546 tp->nvram_jedecnum = JEDEC_ATMEL;
14547 tg3_flag_set(tp, NVRAM_BUFFERED);
14548 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14550 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14551 tw32(NVRAM_CFG1, nvcfg1);
14553 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14554 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14555 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14556 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14557 tp->nvram_jedecnum = JEDEC_ATMEL;
14558 tg3_flag_set(tp, NVRAM_BUFFERED);
14559 tg3_flag_set(tp, FLASH);
14560 tp->nvram_pagesize = 264;
14562 case FLASH_5752VENDOR_ST_M45PE10:
14563 case FLASH_5752VENDOR_ST_M45PE20:
14564 case FLASH_5752VENDOR_ST_M45PE40:
14565 tp->nvram_jedecnum = JEDEC_ST;
14566 tg3_flag_set(tp, NVRAM_BUFFERED);
14567 tg3_flag_set(tp, FLASH);
14568 tp->nvram_pagesize = 256;
14573 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14575 u32 nvcfg1, protect = 0;
14577 nvcfg1 = tr32(NVRAM_CFG1);
14579 /* NVRAM protection for TPM */
14580 if (nvcfg1 & (1 << 27)) {
14581 tg3_flag_set(tp, PROTECTED_NVRAM);
14585 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14587 case FLASH_5761VENDOR_ATMEL_ADB021D:
14588 case FLASH_5761VENDOR_ATMEL_ADB041D:
14589 case FLASH_5761VENDOR_ATMEL_ADB081D:
14590 case FLASH_5761VENDOR_ATMEL_ADB161D:
14591 case FLASH_5761VENDOR_ATMEL_MDB021D:
14592 case FLASH_5761VENDOR_ATMEL_MDB041D:
14593 case FLASH_5761VENDOR_ATMEL_MDB081D:
14594 case FLASH_5761VENDOR_ATMEL_MDB161D:
14595 tp->nvram_jedecnum = JEDEC_ATMEL;
14596 tg3_flag_set(tp, NVRAM_BUFFERED);
14597 tg3_flag_set(tp, FLASH);
14598 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14599 tp->nvram_pagesize = 256;
14601 case FLASH_5761VENDOR_ST_A_M45PE20:
14602 case FLASH_5761VENDOR_ST_A_M45PE40:
14603 case FLASH_5761VENDOR_ST_A_M45PE80:
14604 case FLASH_5761VENDOR_ST_A_M45PE16:
14605 case FLASH_5761VENDOR_ST_M_M45PE20:
14606 case FLASH_5761VENDOR_ST_M_M45PE40:
14607 case FLASH_5761VENDOR_ST_M_M45PE80:
14608 case FLASH_5761VENDOR_ST_M_M45PE16:
14609 tp->nvram_jedecnum = JEDEC_ST;
14610 tg3_flag_set(tp, NVRAM_BUFFERED);
14611 tg3_flag_set(tp, FLASH);
14612 tp->nvram_pagesize = 256;
14617 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14620 case FLASH_5761VENDOR_ATMEL_ADB161D:
14621 case FLASH_5761VENDOR_ATMEL_MDB161D:
14622 case FLASH_5761VENDOR_ST_A_M45PE16:
14623 case FLASH_5761VENDOR_ST_M_M45PE16:
14624 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14626 case FLASH_5761VENDOR_ATMEL_ADB081D:
14627 case FLASH_5761VENDOR_ATMEL_MDB081D:
14628 case FLASH_5761VENDOR_ST_A_M45PE80:
14629 case FLASH_5761VENDOR_ST_M_M45PE80:
14630 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14632 case FLASH_5761VENDOR_ATMEL_ADB041D:
14633 case FLASH_5761VENDOR_ATMEL_MDB041D:
14634 case FLASH_5761VENDOR_ST_A_M45PE40:
14635 case FLASH_5761VENDOR_ST_M_M45PE40:
14636 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14638 case FLASH_5761VENDOR_ATMEL_ADB021D:
14639 case FLASH_5761VENDOR_ATMEL_MDB021D:
14640 case FLASH_5761VENDOR_ST_A_M45PE20:
14641 case FLASH_5761VENDOR_ST_M_M45PE20:
14642 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14648 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14650 tp->nvram_jedecnum = JEDEC_ATMEL;
14651 tg3_flag_set(tp, NVRAM_BUFFERED);
14652 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14655 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14659 nvcfg1 = tr32(NVRAM_CFG1);
14661 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14662 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14663 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14664 tp->nvram_jedecnum = JEDEC_ATMEL;
14665 tg3_flag_set(tp, NVRAM_BUFFERED);
14666 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14668 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14669 tw32(NVRAM_CFG1, nvcfg1);
14671 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14672 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14673 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14674 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14675 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14676 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14677 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14678 tp->nvram_jedecnum = JEDEC_ATMEL;
14679 tg3_flag_set(tp, NVRAM_BUFFERED);
14680 tg3_flag_set(tp, FLASH);
14682 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14683 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14684 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14685 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14686 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14688 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14689 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14690 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14692 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14693 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14694 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14698 case FLASH_5752VENDOR_ST_M45PE10:
14699 case FLASH_5752VENDOR_ST_M45PE20:
14700 case FLASH_5752VENDOR_ST_M45PE40:
14701 tp->nvram_jedecnum = JEDEC_ST;
14702 tg3_flag_set(tp, NVRAM_BUFFERED);
14703 tg3_flag_set(tp, FLASH);
14705 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14706 case FLASH_5752VENDOR_ST_M45PE10:
14707 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14709 case FLASH_5752VENDOR_ST_M45PE20:
14710 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14712 case FLASH_5752VENDOR_ST_M45PE40:
14713 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14718 tg3_flag_set(tp, NO_NVRAM);
14722 tg3_nvram_get_pagesize(tp, nvcfg1);
14723 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14724 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14728 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14732 nvcfg1 = tr32(NVRAM_CFG1);
14734 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14735 case FLASH_5717VENDOR_ATMEL_EEPROM:
14736 case FLASH_5717VENDOR_MICRO_EEPROM:
14737 tp->nvram_jedecnum = JEDEC_ATMEL;
14738 tg3_flag_set(tp, NVRAM_BUFFERED);
14739 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14741 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14742 tw32(NVRAM_CFG1, nvcfg1);
14744 case FLASH_5717VENDOR_ATMEL_MDB011D:
14745 case FLASH_5717VENDOR_ATMEL_ADB011B:
14746 case FLASH_5717VENDOR_ATMEL_ADB011D:
14747 case FLASH_5717VENDOR_ATMEL_MDB021D:
14748 case FLASH_5717VENDOR_ATMEL_ADB021B:
14749 case FLASH_5717VENDOR_ATMEL_ADB021D:
14750 case FLASH_5717VENDOR_ATMEL_45USPT:
14751 tp->nvram_jedecnum = JEDEC_ATMEL;
14752 tg3_flag_set(tp, NVRAM_BUFFERED);
14753 tg3_flag_set(tp, FLASH);
14755 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14756 case FLASH_5717VENDOR_ATMEL_MDB021D:
14757 /* Detect size with tg3_nvram_get_size() */
14759 case FLASH_5717VENDOR_ATMEL_ADB021B:
14760 case FLASH_5717VENDOR_ATMEL_ADB021D:
14761 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14764 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14768 case FLASH_5717VENDOR_ST_M_M25PE10:
14769 case FLASH_5717VENDOR_ST_A_M25PE10:
14770 case FLASH_5717VENDOR_ST_M_M45PE10:
14771 case FLASH_5717VENDOR_ST_A_M45PE10:
14772 case FLASH_5717VENDOR_ST_M_M25PE20:
14773 case FLASH_5717VENDOR_ST_A_M25PE20:
14774 case FLASH_5717VENDOR_ST_M_M45PE20:
14775 case FLASH_5717VENDOR_ST_A_M45PE20:
14776 case FLASH_5717VENDOR_ST_25USPT:
14777 case FLASH_5717VENDOR_ST_45USPT:
14778 tp->nvram_jedecnum = JEDEC_ST;
14779 tg3_flag_set(tp, NVRAM_BUFFERED);
14780 tg3_flag_set(tp, FLASH);
14782 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14783 case FLASH_5717VENDOR_ST_M_M25PE20:
14784 case FLASH_5717VENDOR_ST_M_M45PE20:
14785 /* Detect size with tg3_nvram_get_size() */
14787 case FLASH_5717VENDOR_ST_A_M25PE20:
14788 case FLASH_5717VENDOR_ST_A_M45PE20:
14789 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14792 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14797 tg3_flag_set(tp, NO_NVRAM);
14801 tg3_nvram_get_pagesize(tp, nvcfg1);
14802 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14803 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14806 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14808 u32 nvcfg1, nvmpinstrp, nv_status;
14810 nvcfg1 = tr32(NVRAM_CFG1);
14811 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14813 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14814 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14815 tg3_flag_set(tp, NO_NVRAM);
14819 switch (nvmpinstrp) {
14820 case FLASH_5762_MX25L_100:
14821 case FLASH_5762_MX25L_200:
14822 case FLASH_5762_MX25L_400:
14823 case FLASH_5762_MX25L_800:
14824 case FLASH_5762_MX25L_160_320:
14825 tp->nvram_pagesize = 4096;
14826 tp->nvram_jedecnum = JEDEC_MACRONIX;
14827 tg3_flag_set(tp, NVRAM_BUFFERED);
14828 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14829 tg3_flag_set(tp, FLASH);
14830 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14832 (1 << (nv_status >> AUTOSENSE_DEVID &
14833 AUTOSENSE_DEVID_MASK)
14834 << AUTOSENSE_SIZE_IN_MB);
14837 case FLASH_5762_EEPROM_HD:
14838 nvmpinstrp = FLASH_5720_EEPROM_HD;
14840 case FLASH_5762_EEPROM_LD:
14841 nvmpinstrp = FLASH_5720_EEPROM_LD;
14843 case FLASH_5720VENDOR_M_ST_M45PE20:
14844 /* This pinstrap supports multiple sizes, so force it
14845 * to read the actual size from location 0xf0.
14847 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14852 switch (nvmpinstrp) {
14853 case FLASH_5720_EEPROM_HD:
14854 case FLASH_5720_EEPROM_LD:
14855 tp->nvram_jedecnum = JEDEC_ATMEL;
14856 tg3_flag_set(tp, NVRAM_BUFFERED);
14858 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14859 tw32(NVRAM_CFG1, nvcfg1);
14860 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14861 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14863 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14865 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14866 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14867 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14868 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14869 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14870 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14871 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14872 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14873 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14874 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14875 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14876 case FLASH_5720VENDOR_ATMEL_45USPT:
14877 tp->nvram_jedecnum = JEDEC_ATMEL;
14878 tg3_flag_set(tp, NVRAM_BUFFERED);
14879 tg3_flag_set(tp, FLASH);
14881 switch (nvmpinstrp) {
14882 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14883 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14884 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14885 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14887 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14888 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14889 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14890 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14892 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14893 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14894 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14897 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14898 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14902 case FLASH_5720VENDOR_M_ST_M25PE10:
14903 case FLASH_5720VENDOR_M_ST_M45PE10:
14904 case FLASH_5720VENDOR_A_ST_M25PE10:
14905 case FLASH_5720VENDOR_A_ST_M45PE10:
14906 case FLASH_5720VENDOR_M_ST_M25PE20:
14907 case FLASH_5720VENDOR_M_ST_M45PE20:
14908 case FLASH_5720VENDOR_A_ST_M25PE20:
14909 case FLASH_5720VENDOR_A_ST_M45PE20:
14910 case FLASH_5720VENDOR_M_ST_M25PE40:
14911 case FLASH_5720VENDOR_M_ST_M45PE40:
14912 case FLASH_5720VENDOR_A_ST_M25PE40:
14913 case FLASH_5720VENDOR_A_ST_M45PE40:
14914 case FLASH_5720VENDOR_M_ST_M25PE80:
14915 case FLASH_5720VENDOR_M_ST_M45PE80:
14916 case FLASH_5720VENDOR_A_ST_M25PE80:
14917 case FLASH_5720VENDOR_A_ST_M45PE80:
14918 case FLASH_5720VENDOR_ST_25USPT:
14919 case FLASH_5720VENDOR_ST_45USPT:
14920 tp->nvram_jedecnum = JEDEC_ST;
14921 tg3_flag_set(tp, NVRAM_BUFFERED);
14922 tg3_flag_set(tp, FLASH);
14924 switch (nvmpinstrp) {
14925 case FLASH_5720VENDOR_M_ST_M25PE20:
14926 case FLASH_5720VENDOR_M_ST_M45PE20:
14927 case FLASH_5720VENDOR_A_ST_M25PE20:
14928 case FLASH_5720VENDOR_A_ST_M45PE20:
14929 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14931 case FLASH_5720VENDOR_M_ST_M25PE40:
14932 case FLASH_5720VENDOR_M_ST_M45PE40:
14933 case FLASH_5720VENDOR_A_ST_M25PE40:
14934 case FLASH_5720VENDOR_A_ST_M45PE40:
14935 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14937 case FLASH_5720VENDOR_M_ST_M25PE80:
14938 case FLASH_5720VENDOR_M_ST_M45PE80:
14939 case FLASH_5720VENDOR_A_ST_M25PE80:
14940 case FLASH_5720VENDOR_A_ST_M45PE80:
14941 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14944 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14945 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14950 tg3_flag_set(tp, NO_NVRAM);
14954 tg3_nvram_get_pagesize(tp, nvcfg1);
14955 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14956 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14958 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14961 if (tg3_nvram_read(tp, 0, &val))
14964 if (val != TG3_EEPROM_MAGIC &&
14965 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14966 tg3_flag_set(tp, NO_NVRAM);
14970 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14971 static void tg3_nvram_init(struct tg3 *tp)
14973 if (tg3_flag(tp, IS_SSB_CORE)) {
14974 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14975 tg3_flag_clear(tp, NVRAM);
14976 tg3_flag_clear(tp, NVRAM_BUFFERED);
14977 tg3_flag_set(tp, NO_NVRAM);
14981 tw32_f(GRC_EEPROM_ADDR,
14982 (EEPROM_ADDR_FSM_RESET |
14983 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14984 EEPROM_ADDR_CLKPERD_SHIFT)));
14988 /* Enable seeprom accesses. */
14989 tw32_f(GRC_LOCAL_CTRL,
14990 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14993 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14994 tg3_asic_rev(tp) != ASIC_REV_5701) {
14995 tg3_flag_set(tp, NVRAM);
14997 if (tg3_nvram_lock(tp)) {
14998 netdev_warn(tp->dev,
14999 "Cannot get nvram lock, %s failed\n",
15003 tg3_enable_nvram_access(tp);
15005 tp->nvram_size = 0;
15007 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15008 tg3_get_5752_nvram_info(tp);
15009 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15010 tg3_get_5755_nvram_info(tp);
15011 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15012 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15013 tg3_asic_rev(tp) == ASIC_REV_5785)
15014 tg3_get_5787_nvram_info(tp);
15015 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15016 tg3_get_5761_nvram_info(tp);
15017 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15018 tg3_get_5906_nvram_info(tp);
15019 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15020 tg3_flag(tp, 57765_CLASS))
15021 tg3_get_57780_nvram_info(tp);
15022 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15023 tg3_asic_rev(tp) == ASIC_REV_5719)
15024 tg3_get_5717_nvram_info(tp);
15025 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15026 tg3_asic_rev(tp) == ASIC_REV_5762)
15027 tg3_get_5720_nvram_info(tp);
15029 tg3_get_nvram_info(tp);
15031 if (tp->nvram_size == 0)
15032 tg3_get_nvram_size(tp);
15034 tg3_disable_nvram_access(tp);
15035 tg3_nvram_unlock(tp);
15038 tg3_flag_clear(tp, NVRAM);
15039 tg3_flag_clear(tp, NVRAM_BUFFERED);
15041 tg3_get_eeprom_size(tp);
15045 struct subsys_tbl_ent {
15046 u16 subsys_vendor, subsys_devid;
15050 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15051 /* Broadcom boards. */
15052 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15053 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15054 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15055 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15056 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15057 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15058 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15059 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15060 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15061 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15062 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15063 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15064 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15065 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15066 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15067 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15068 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15069 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15070 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15071 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15072 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15073 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15076 { TG3PCI_SUBVENDOR_ID_3COM,
15077 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15078 { TG3PCI_SUBVENDOR_ID_3COM,
15079 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15080 { TG3PCI_SUBVENDOR_ID_3COM,
15081 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15082 { TG3PCI_SUBVENDOR_ID_3COM,
15083 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15084 { TG3PCI_SUBVENDOR_ID_3COM,
15085 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15088 { TG3PCI_SUBVENDOR_ID_DELL,
15089 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15090 { TG3PCI_SUBVENDOR_ID_DELL,
15091 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15092 { TG3PCI_SUBVENDOR_ID_DELL,
15093 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15094 { TG3PCI_SUBVENDOR_ID_DELL,
15095 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15097 /* Compaq boards. */
15098 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15099 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15100 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15101 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15102 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15103 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15104 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15105 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15106 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15107 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15110 { TG3PCI_SUBVENDOR_ID_IBM,
15111 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15114 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15118 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15119 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15120 tp->pdev->subsystem_vendor) &&
15121 (subsys_id_to_phy_id[i].subsys_devid ==
15122 tp->pdev->subsystem_device))
15123 return &subsys_id_to_phy_id[i];
15128 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15132 tp->phy_id = TG3_PHY_ID_INVALID;
15133 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15135 /* Assume an onboard device and WOL capable by default. */
15136 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15137 tg3_flag_set(tp, WOL_CAP);
15139 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15140 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15141 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15142 tg3_flag_set(tp, IS_NIC);
15144 val = tr32(VCPU_CFGSHDW);
15145 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15146 tg3_flag_set(tp, ASPM_WORKAROUND);
15147 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15148 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15149 tg3_flag_set(tp, WOL_ENABLE);
15150 device_set_wakeup_enable(&tp->pdev->dev, true);
15155 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15156 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15157 u32 nic_cfg, led_cfg;
15158 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15159 u32 nic_phy_id, ver, eeprom_phy_id;
15160 int eeprom_phy_serdes = 0;
15162 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15163 tp->nic_sram_data_cfg = nic_cfg;
15165 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15166 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15167 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15168 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15169 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15170 (ver > 0) && (ver < 0x100))
15171 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15173 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15174 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15176 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15177 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15178 tg3_asic_rev(tp) == ASIC_REV_5720)
15179 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15181 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15182 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15183 eeprom_phy_serdes = 1;
15185 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15186 if (nic_phy_id != 0) {
15187 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15188 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15190 eeprom_phy_id = (id1 >> 16) << 10;
15191 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15192 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15196 tp->phy_id = eeprom_phy_id;
15197 if (eeprom_phy_serdes) {
15198 if (!tg3_flag(tp, 5705_PLUS))
15199 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15201 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15204 if (tg3_flag(tp, 5750_PLUS))
15205 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15206 SHASTA_EXT_LED_MODE_MASK);
15208 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15212 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15213 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15216 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15217 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15220 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15221 tp->led_ctrl = LED_CTRL_MODE_MAC;
15223 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15224 * read on some older 5700/5701 bootcode.
15226 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15227 tg3_asic_rev(tp) == ASIC_REV_5701)
15228 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15232 case SHASTA_EXT_LED_SHARED:
15233 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15234 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15235 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15236 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15237 LED_CTRL_MODE_PHY_2);
15239 if (tg3_flag(tp, 5717_PLUS) ||
15240 tg3_asic_rev(tp) == ASIC_REV_5762)
15241 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15242 LED_CTRL_BLINK_RATE_MASK;
15246 case SHASTA_EXT_LED_MAC:
15247 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15250 case SHASTA_EXT_LED_COMBO:
15251 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15252 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15253 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15254 LED_CTRL_MODE_PHY_2);
15259 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15260 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15261 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15262 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15264 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15265 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15267 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15268 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15269 if ((tp->pdev->subsystem_vendor ==
15270 PCI_VENDOR_ID_ARIMA) &&
15271 (tp->pdev->subsystem_device == 0x205a ||
15272 tp->pdev->subsystem_device == 0x2063))
15273 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15275 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15276 tg3_flag_set(tp, IS_NIC);
15279 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15280 tg3_flag_set(tp, ENABLE_ASF);
15281 if (tg3_flag(tp, 5750_PLUS))
15282 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15285 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15286 tg3_flag(tp, 5750_PLUS))
15287 tg3_flag_set(tp, ENABLE_APE);
15289 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15290 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15291 tg3_flag_clear(tp, WOL_CAP);
15293 if (tg3_flag(tp, WOL_CAP) &&
15294 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15295 tg3_flag_set(tp, WOL_ENABLE);
15296 device_set_wakeup_enable(&tp->pdev->dev, true);
15299 if (cfg2 & (1 << 17))
15300 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15302 /* serdes signal pre-emphasis in register 0x590 set by */
15303 /* bootcode if bit 18 is set */
15304 if (cfg2 & (1 << 18))
15305 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15307 if ((tg3_flag(tp, 57765_PLUS) ||
15308 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15309 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15310 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15311 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15313 if (tg3_flag(tp, PCI_EXPRESS)) {
15316 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15317 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15318 !tg3_flag(tp, 57765_PLUS) &&
15319 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15320 tg3_flag_set(tp, ASPM_WORKAROUND);
15321 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15322 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15323 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15324 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15327 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15328 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15329 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15330 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15331 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15332 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15334 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15335 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15338 if (tg3_flag(tp, WOL_CAP))
15339 device_set_wakeup_enable(&tp->pdev->dev,
15340 tg3_flag(tp, WOL_ENABLE));
15342 device_set_wakeup_capable(&tp->pdev->dev, false);
15345 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15348 u32 val2, off = offset * 8;
15350 err = tg3_nvram_lock(tp);
15354 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15355 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15356 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15357 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15360 for (i = 0; i < 100; i++) {
15361 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15362 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15363 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15369 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15371 tg3_nvram_unlock(tp);
15372 if (val2 & APE_OTP_STATUS_CMD_DONE)
15378 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15383 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15384 tw32(OTP_CTRL, cmd);
15386 /* Wait for up to 1 ms for command to execute. */
15387 for (i = 0; i < 100; i++) {
15388 val = tr32(OTP_STATUS);
15389 if (val & OTP_STATUS_CMD_DONE)
15394 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15397 /* Read the gphy configuration from the OTP region of the chip. The gphy
15398 * configuration is a 32-bit value that straddles the alignment boundary.
15399 * We do two 32-bit reads and then shift and merge the results.
15401 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15403 u32 bhalf_otp, thalf_otp;
15405 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15407 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15410 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15412 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15415 thalf_otp = tr32(OTP_READ_DATA);
15417 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15419 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15422 bhalf_otp = tr32(OTP_READ_DATA);
15424 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15427 static void tg3_phy_init_link_config(struct tg3 *tp)
15429 u32 adv = ADVERTISED_Autoneg;
15431 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15432 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15433 adv |= ADVERTISED_1000baseT_Half;
15434 adv |= ADVERTISED_1000baseT_Full;
15437 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15438 adv |= ADVERTISED_100baseT_Half |
15439 ADVERTISED_100baseT_Full |
15440 ADVERTISED_10baseT_Half |
15441 ADVERTISED_10baseT_Full |
15444 adv |= ADVERTISED_FIBRE;
15446 tp->link_config.advertising = adv;
15447 tp->link_config.speed = SPEED_UNKNOWN;
15448 tp->link_config.duplex = DUPLEX_UNKNOWN;
15449 tp->link_config.autoneg = AUTONEG_ENABLE;
15450 tp->link_config.active_speed = SPEED_UNKNOWN;
15451 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15456 static int tg3_phy_probe(struct tg3 *tp)
15458 u32 hw_phy_id_1, hw_phy_id_2;
15459 u32 hw_phy_id, hw_phy_id_masked;
15462 /* flow control autonegotiation is default behavior */
15463 tg3_flag_set(tp, PAUSE_AUTONEG);
15464 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15466 if (tg3_flag(tp, ENABLE_APE)) {
15467 switch (tp->pci_fn) {
15469 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15472 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15475 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15478 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15483 if (!tg3_flag(tp, ENABLE_ASF) &&
15484 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15485 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15486 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15487 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15489 if (tg3_flag(tp, USE_PHYLIB))
15490 return tg3_phy_init(tp);
15492 /* Reading the PHY ID register can conflict with ASF
15493 * firmware access to the PHY hardware.
15496 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15497 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15499 /* Now read the physical PHY_ID from the chip and verify
15500 * that it is sane. If it doesn't look good, we fall back
15501 * to either the hard-coded table based PHY_ID and failing
15502 * that the value found in the eeprom area.
15504 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15505 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15507 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15508 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15509 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15511 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15514 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15515 tp->phy_id = hw_phy_id;
15516 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15517 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15519 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15521 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15522 /* Do nothing, phy ID already set up in
15523 * tg3_get_eeprom_hw_cfg().
15526 struct subsys_tbl_ent *p;
15528 /* No eeprom signature? Try the hardcoded
15529 * subsys device table.
15531 p = tg3_lookup_by_subsys(tp);
15533 tp->phy_id = p->phy_id;
15534 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15535 /* For now we saw the IDs 0xbc050cd0,
15536 * 0xbc050f80 and 0xbc050c30 on devices
15537 * connected to an BCM4785 and there are
15538 * probably more. Just assume that the phy is
15539 * supported when it is connected to a SSB core
15546 tp->phy_id == TG3_PHY_ID_BCM8002)
15547 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15551 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15552 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15553 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15554 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15555 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15556 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15557 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15558 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15559 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15560 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15562 tp->eee.supported = SUPPORTED_100baseT_Full |
15563 SUPPORTED_1000baseT_Full;
15564 tp->eee.advertised = ADVERTISED_100baseT_Full |
15565 ADVERTISED_1000baseT_Full;
15566 tp->eee.eee_enabled = 1;
15567 tp->eee.tx_lpi_enabled = 1;
15568 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15571 tg3_phy_init_link_config(tp);
15573 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15574 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15575 !tg3_flag(tp, ENABLE_APE) &&
15576 !tg3_flag(tp, ENABLE_ASF)) {
15579 tg3_readphy(tp, MII_BMSR, &bmsr);
15580 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15581 (bmsr & BMSR_LSTATUS))
15582 goto skip_phy_reset;
15584 err = tg3_phy_reset(tp);
15588 tg3_phy_set_wirespeed(tp);
15590 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15591 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15592 tp->link_config.flowctrl);
15594 tg3_writephy(tp, MII_BMCR,
15595 BMCR_ANENABLE | BMCR_ANRESTART);
15600 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15601 err = tg3_init_5401phy_dsp(tp);
15605 err = tg3_init_5401phy_dsp(tp);
15611 static void tg3_read_vpd(struct tg3 *tp)
15614 unsigned int block_end, rosize, len;
15618 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15622 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15624 goto out_not_found;
15626 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15627 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15628 i += PCI_VPD_LRDT_TAG_SIZE;
15630 if (block_end > vpdlen)
15631 goto out_not_found;
15633 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15634 PCI_VPD_RO_KEYWORD_MFR_ID);
15636 len = pci_vpd_info_field_size(&vpd_data[j]);
15638 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15639 if (j + len > block_end || len != 4 ||
15640 memcmp(&vpd_data[j], "1028", 4))
15643 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15644 PCI_VPD_RO_KEYWORD_VENDOR0);
15648 len = pci_vpd_info_field_size(&vpd_data[j]);
15650 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15651 if (j + len > block_end)
15654 if (len >= sizeof(tp->fw_ver))
15655 len = sizeof(tp->fw_ver) - 1;
15656 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15657 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15662 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15663 PCI_VPD_RO_KEYWORD_PARTNO);
15665 goto out_not_found;
15667 len = pci_vpd_info_field_size(&vpd_data[i]);
15669 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15670 if (len > TG3_BPN_SIZE ||
15671 (len + i) > vpdlen)
15672 goto out_not_found;
15674 memcpy(tp->board_part_number, &vpd_data[i], len);
15678 if (tp->board_part_number[0])
15682 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15683 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15684 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15685 strcpy(tp->board_part_number, "BCM5717");
15686 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15687 strcpy(tp->board_part_number, "BCM5718");
15690 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15691 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15692 strcpy(tp->board_part_number, "BCM57780");
15693 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15694 strcpy(tp->board_part_number, "BCM57760");
15695 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15696 strcpy(tp->board_part_number, "BCM57790");
15697 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15698 strcpy(tp->board_part_number, "BCM57788");
15701 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15702 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15703 strcpy(tp->board_part_number, "BCM57761");
15704 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15705 strcpy(tp->board_part_number, "BCM57765");
15706 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15707 strcpy(tp->board_part_number, "BCM57781");
15708 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15709 strcpy(tp->board_part_number, "BCM57785");
15710 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15711 strcpy(tp->board_part_number, "BCM57791");
15712 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15713 strcpy(tp->board_part_number, "BCM57795");
15716 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15717 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15718 strcpy(tp->board_part_number, "BCM57762");
15719 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15720 strcpy(tp->board_part_number, "BCM57766");
15721 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15722 strcpy(tp->board_part_number, "BCM57782");
15723 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15724 strcpy(tp->board_part_number, "BCM57786");
15727 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15728 strcpy(tp->board_part_number, "BCM95906");
15731 strcpy(tp->board_part_number, "none");
15735 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15739 if (tg3_nvram_read(tp, offset, &val) ||
15740 (val & 0xfc000000) != 0x0c000000 ||
15741 tg3_nvram_read(tp, offset + 4, &val) ||
15748 static void tg3_read_bc_ver(struct tg3 *tp)
15750 u32 val, offset, start, ver_offset;
15752 bool newver = false;
15754 if (tg3_nvram_read(tp, 0xc, &offset) ||
15755 tg3_nvram_read(tp, 0x4, &start))
15758 offset = tg3_nvram_logical_addr(tp, offset);
15760 if (tg3_nvram_read(tp, offset, &val))
15763 if ((val & 0xfc000000) == 0x0c000000) {
15764 if (tg3_nvram_read(tp, offset + 4, &val))
15771 dst_off = strlen(tp->fw_ver);
15774 if (TG3_VER_SIZE - dst_off < 16 ||
15775 tg3_nvram_read(tp, offset + 8, &ver_offset))
15778 offset = offset + ver_offset - start;
15779 for (i = 0; i < 16; i += 4) {
15781 if (tg3_nvram_read_be32(tp, offset + i, &v))
15784 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15789 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15792 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15793 TG3_NVM_BCVER_MAJSFT;
15794 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15795 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15796 "v%d.%02d", major, minor);
15800 static void tg3_read_hwsb_ver(struct tg3 *tp)
15802 u32 val, major, minor;
15804 /* Use native endian representation */
15805 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15808 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15809 TG3_NVM_HWSB_CFG1_MAJSFT;
15810 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15811 TG3_NVM_HWSB_CFG1_MINSFT;
15813 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15816 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15818 u32 offset, major, minor, build;
15820 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15822 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15825 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15826 case TG3_EEPROM_SB_REVISION_0:
15827 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15829 case TG3_EEPROM_SB_REVISION_2:
15830 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15832 case TG3_EEPROM_SB_REVISION_3:
15833 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15835 case TG3_EEPROM_SB_REVISION_4:
15836 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15838 case TG3_EEPROM_SB_REVISION_5:
15839 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15841 case TG3_EEPROM_SB_REVISION_6:
15842 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15848 if (tg3_nvram_read(tp, offset, &val))
15851 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15852 TG3_EEPROM_SB_EDH_BLD_SHFT;
15853 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15854 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15855 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15857 if (minor > 99 || build > 26)
15860 offset = strlen(tp->fw_ver);
15861 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15862 " v%d.%02d", major, minor);
15865 offset = strlen(tp->fw_ver);
15866 if (offset < TG3_VER_SIZE - 1)
15867 tp->fw_ver[offset] = 'a' + build - 1;
15871 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15873 u32 val, offset, start;
15876 for (offset = TG3_NVM_DIR_START;
15877 offset < TG3_NVM_DIR_END;
15878 offset += TG3_NVM_DIRENT_SIZE) {
15879 if (tg3_nvram_read(tp, offset, &val))
15882 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15886 if (offset == TG3_NVM_DIR_END)
15889 if (!tg3_flag(tp, 5705_PLUS))
15890 start = 0x08000000;
15891 else if (tg3_nvram_read(tp, offset - 4, &start))
15894 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15895 !tg3_fw_img_is_valid(tp, offset) ||
15896 tg3_nvram_read(tp, offset + 8, &val))
15899 offset += val - start;
15901 vlen = strlen(tp->fw_ver);
15903 tp->fw_ver[vlen++] = ',';
15904 tp->fw_ver[vlen++] = ' ';
15906 for (i = 0; i < 4; i++) {
15908 if (tg3_nvram_read_be32(tp, offset, &v))
15911 offset += sizeof(v);
15913 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15914 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15918 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15923 static void tg3_probe_ncsi(struct tg3 *tp)
15927 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15928 if (apedata != APE_SEG_SIG_MAGIC)
15931 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15932 if (!(apedata & APE_FW_STATUS_READY))
15935 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15936 tg3_flag_set(tp, APE_HAS_NCSI);
15939 static void tg3_read_dash_ver(struct tg3 *tp)
15945 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15947 if (tg3_flag(tp, APE_HAS_NCSI))
15949 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15954 vlen = strlen(tp->fw_ver);
15956 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15958 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15959 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15960 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15961 (apedata & APE_FW_VERSION_BLDMSK));
15964 static void tg3_read_otp_ver(struct tg3 *tp)
15968 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15971 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15972 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15973 TG3_OTP_MAGIC0_VALID(val)) {
15974 u64 val64 = (u64) val << 32 | val2;
15978 for (i = 0; i < 7; i++) {
15979 if ((val64 & 0xff) == 0)
15981 ver = val64 & 0xff;
15984 vlen = strlen(tp->fw_ver);
15985 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15989 static void tg3_read_fw_ver(struct tg3 *tp)
15992 bool vpd_vers = false;
15994 if (tp->fw_ver[0] != 0)
15997 if (tg3_flag(tp, NO_NVRAM)) {
15998 strcat(tp->fw_ver, "sb");
15999 tg3_read_otp_ver(tp);
16003 if (tg3_nvram_read(tp, 0, &val))
16006 if (val == TG3_EEPROM_MAGIC)
16007 tg3_read_bc_ver(tp);
16008 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16009 tg3_read_sb_ver(tp, val);
16010 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16011 tg3_read_hwsb_ver(tp);
16013 if (tg3_flag(tp, ENABLE_ASF)) {
16014 if (tg3_flag(tp, ENABLE_APE)) {
16015 tg3_probe_ncsi(tp);
16017 tg3_read_dash_ver(tp);
16018 } else if (!vpd_vers) {
16019 tg3_read_mgmtfw_ver(tp);
16023 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16026 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16028 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16029 return TG3_RX_RET_MAX_SIZE_5717;
16030 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16031 return TG3_RX_RET_MAX_SIZE_5700;
16033 return TG3_RX_RET_MAX_SIZE_5705;
16036 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16037 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16038 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16039 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16043 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16045 struct pci_dev *peer;
16046 unsigned int func, devnr = tp->pdev->devfn & ~7;
16048 for (func = 0; func < 8; func++) {
16049 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16050 if (peer && peer != tp->pdev)
16054 /* 5704 can be configured in single-port mode, set peer to
16055 * tp->pdev in that case.
16063 * We don't need to keep the refcount elevated; there's no way
16064 * to remove one half of this device without removing the other
16071 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16073 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16074 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16077 /* All devices that use the alternate
16078 * ASIC REV location have a CPMU.
16080 tg3_flag_set(tp, CPMU_PRESENT);
16082 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16083 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16084 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16086 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16087 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16088 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16089 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16090 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16093 reg = TG3PCI_GEN2_PRODID_ASICREV;
16094 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16095 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16099 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16100 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16101 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16103 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16104 reg = TG3PCI_GEN15_PRODID_ASICREV;
16106 reg = TG3PCI_PRODID_ASICREV;
16108 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16111 /* Wrong chip ID in 5752 A0. This code can be removed later
16112 * as A0 is not in production.
16114 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16115 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16117 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16118 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16120 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16121 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16122 tg3_asic_rev(tp) == ASIC_REV_5720)
16123 tg3_flag_set(tp, 5717_PLUS);
16125 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16126 tg3_asic_rev(tp) == ASIC_REV_57766)
16127 tg3_flag_set(tp, 57765_CLASS);
16129 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16130 tg3_asic_rev(tp) == ASIC_REV_5762)
16131 tg3_flag_set(tp, 57765_PLUS);
16133 /* Intentionally exclude ASIC_REV_5906 */
16134 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16135 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16136 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16137 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16138 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16139 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16140 tg3_flag(tp, 57765_PLUS))
16141 tg3_flag_set(tp, 5755_PLUS);
16143 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16144 tg3_asic_rev(tp) == ASIC_REV_5714)
16145 tg3_flag_set(tp, 5780_CLASS);
16147 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16148 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16149 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16150 tg3_flag(tp, 5755_PLUS) ||
16151 tg3_flag(tp, 5780_CLASS))
16152 tg3_flag_set(tp, 5750_PLUS);
16154 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16155 tg3_flag(tp, 5750_PLUS))
16156 tg3_flag_set(tp, 5705_PLUS);
16159 static bool tg3_10_100_only_device(struct tg3 *tp,
16160 const struct pci_device_id *ent)
16162 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16164 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16165 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16166 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16169 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16170 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16171 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16181 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16184 u32 pci_state_reg, grc_misc_cfg;
16189 /* Force memory write invalidate off. If we leave it on,
16190 * then on 5700_BX chips we have to enable a workaround.
16191 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16192 * to match the cacheline size. The Broadcom driver have this
16193 * workaround but turns MWI off all the times so never uses
16194 * it. This seems to suggest that the workaround is insufficient.
16196 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16197 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16198 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16200 /* Important! -- Make sure register accesses are byteswapped
16201 * correctly. Also, for those chips that require it, make
16202 * sure that indirect register accesses are enabled before
16203 * the first operation.
16205 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16207 tp->misc_host_ctrl |= (misc_ctrl_reg &
16208 MISC_HOST_CTRL_CHIPREV);
16209 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16210 tp->misc_host_ctrl);
16212 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16214 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16215 * we need to disable memory and use config. cycles
16216 * only to access all registers. The 5702/03 chips
16217 * can mistakenly decode the special cycles from the
16218 * ICH chipsets as memory write cycles, causing corruption
16219 * of register and memory space. Only certain ICH bridges
16220 * will drive special cycles with non-zero data during the
16221 * address phase which can fall within the 5703's address
16222 * range. This is not an ICH bug as the PCI spec allows
16223 * non-zero address during special cycles. However, only
16224 * these ICH bridges are known to drive non-zero addresses
16225 * during special cycles.
16227 * Since special cycles do not cross PCI bridges, we only
16228 * enable this workaround if the 5703 is on the secondary
16229 * bus of these ICH bridges.
16231 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16232 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16233 static struct tg3_dev_id {
16237 } ich_chipsets[] = {
16238 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16240 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16242 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16244 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16248 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16249 struct pci_dev *bridge = NULL;
16251 while (pci_id->vendor != 0) {
16252 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16258 if (pci_id->rev != PCI_ANY_ID) {
16259 if (bridge->revision > pci_id->rev)
16262 if (bridge->subordinate &&
16263 (bridge->subordinate->number ==
16264 tp->pdev->bus->number)) {
16265 tg3_flag_set(tp, ICH_WORKAROUND);
16266 pci_dev_put(bridge);
16272 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16273 static struct tg3_dev_id {
16276 } bridge_chipsets[] = {
16277 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16278 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16281 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16282 struct pci_dev *bridge = NULL;
16284 while (pci_id->vendor != 0) {
16285 bridge = pci_get_device(pci_id->vendor,
16292 if (bridge->subordinate &&
16293 (bridge->subordinate->number <=
16294 tp->pdev->bus->number) &&
16295 (bridge->subordinate->busn_res.end >=
16296 tp->pdev->bus->number)) {
16297 tg3_flag_set(tp, 5701_DMA_BUG);
16298 pci_dev_put(bridge);
16304 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16305 * DMA addresses > 40-bit. This bridge may have other additional
16306 * 57xx devices behind it in some 4-port NIC designs for example.
16307 * Any tg3 device found behind the bridge will also need the 40-bit
16310 if (tg3_flag(tp, 5780_CLASS)) {
16311 tg3_flag_set(tp, 40BIT_DMA_BUG);
16312 tp->msi_cap = tp->pdev->msi_cap;
16314 struct pci_dev *bridge = NULL;
16317 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16318 PCI_DEVICE_ID_SERVERWORKS_EPB,
16320 if (bridge && bridge->subordinate &&
16321 (bridge->subordinate->number <=
16322 tp->pdev->bus->number) &&
16323 (bridge->subordinate->busn_res.end >=
16324 tp->pdev->bus->number)) {
16325 tg3_flag_set(tp, 40BIT_DMA_BUG);
16326 pci_dev_put(bridge);
16332 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16333 tg3_asic_rev(tp) == ASIC_REV_5714)
16334 tp->pdev_peer = tg3_find_peer(tp);
16336 /* Determine TSO capabilities */
16337 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16338 ; /* Do nothing. HW bug. */
16339 else if (tg3_flag(tp, 57765_PLUS))
16340 tg3_flag_set(tp, HW_TSO_3);
16341 else if (tg3_flag(tp, 5755_PLUS) ||
16342 tg3_asic_rev(tp) == ASIC_REV_5906)
16343 tg3_flag_set(tp, HW_TSO_2);
16344 else if (tg3_flag(tp, 5750_PLUS)) {
16345 tg3_flag_set(tp, HW_TSO_1);
16346 tg3_flag_set(tp, TSO_BUG);
16347 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16348 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16349 tg3_flag_clear(tp, TSO_BUG);
16350 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16351 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16352 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16353 tg3_flag_set(tp, FW_TSO);
16354 tg3_flag_set(tp, TSO_BUG);
16355 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16356 tp->fw_needed = FIRMWARE_TG3TSO5;
16358 tp->fw_needed = FIRMWARE_TG3TSO;
16361 /* Selectively allow TSO based on operating conditions */
16362 if (tg3_flag(tp, HW_TSO_1) ||
16363 tg3_flag(tp, HW_TSO_2) ||
16364 tg3_flag(tp, HW_TSO_3) ||
16365 tg3_flag(tp, FW_TSO)) {
16366 /* For firmware TSO, assume ASF is disabled.
16367 * We'll disable TSO later if we discover ASF
16368 * is enabled in tg3_get_eeprom_hw_cfg().
16370 tg3_flag_set(tp, TSO_CAPABLE);
16372 tg3_flag_clear(tp, TSO_CAPABLE);
16373 tg3_flag_clear(tp, TSO_BUG);
16374 tp->fw_needed = NULL;
16377 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16378 tp->fw_needed = FIRMWARE_TG3;
16380 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16381 tp->fw_needed = FIRMWARE_TG357766;
16385 if (tg3_flag(tp, 5750_PLUS)) {
16386 tg3_flag_set(tp, SUPPORT_MSI);
16387 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16388 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16389 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16390 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16391 tp->pdev_peer == tp->pdev))
16392 tg3_flag_clear(tp, SUPPORT_MSI);
16394 if (tg3_flag(tp, 5755_PLUS) ||
16395 tg3_asic_rev(tp) == ASIC_REV_5906) {
16396 tg3_flag_set(tp, 1SHOT_MSI);
16399 if (tg3_flag(tp, 57765_PLUS)) {
16400 tg3_flag_set(tp, SUPPORT_MSIX);
16401 tp->irq_max = TG3_IRQ_MAX_VECS;
16407 if (tp->irq_max > 1) {
16408 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16409 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16411 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16412 tg3_asic_rev(tp) == ASIC_REV_5720)
16413 tp->txq_max = tp->irq_max - 1;
16416 if (tg3_flag(tp, 5755_PLUS) ||
16417 tg3_asic_rev(tp) == ASIC_REV_5906)
16418 tg3_flag_set(tp, SHORT_DMA_BUG);
16420 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16421 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16423 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16424 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16425 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16426 tg3_asic_rev(tp) == ASIC_REV_5762)
16427 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16429 if (tg3_flag(tp, 57765_PLUS) &&
16430 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16431 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16433 if (!tg3_flag(tp, 5705_PLUS) ||
16434 tg3_flag(tp, 5780_CLASS) ||
16435 tg3_flag(tp, USE_JUMBO_BDFLAG))
16436 tg3_flag_set(tp, JUMBO_CAPABLE);
16438 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16441 if (pci_is_pcie(tp->pdev)) {
16444 tg3_flag_set(tp, PCI_EXPRESS);
16446 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16447 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16448 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16449 tg3_flag_clear(tp, HW_TSO_2);
16450 tg3_flag_clear(tp, TSO_CAPABLE);
16452 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16453 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16454 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16455 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16456 tg3_flag_set(tp, CLKREQ_BUG);
16457 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16458 tg3_flag_set(tp, L1PLLPD_EN);
16460 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16461 /* BCM5785 devices are effectively PCIe devices, and should
16462 * follow PCIe codepaths, but do not have a PCIe capabilities
16465 tg3_flag_set(tp, PCI_EXPRESS);
16466 } else if (!tg3_flag(tp, 5705_PLUS) ||
16467 tg3_flag(tp, 5780_CLASS)) {
16468 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16469 if (!tp->pcix_cap) {
16470 dev_err(&tp->pdev->dev,
16471 "Cannot find PCI-X capability, aborting\n");
16475 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16476 tg3_flag_set(tp, PCIX_MODE);
16479 /* If we have an AMD 762 or VIA K8T800 chipset, write
16480 * reordering to the mailbox registers done by the host
16481 * controller can cause major troubles. We read back from
16482 * every mailbox register write to force the writes to be
16483 * posted to the chip in order.
16485 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16486 !tg3_flag(tp, PCI_EXPRESS))
16487 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16489 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16490 &tp->pci_cacheline_sz);
16491 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16492 &tp->pci_lat_timer);
16493 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16494 tp->pci_lat_timer < 64) {
16495 tp->pci_lat_timer = 64;
16496 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16497 tp->pci_lat_timer);
16500 /* Important! -- It is critical that the PCI-X hw workaround
16501 * situation is decided before the first MMIO register access.
16503 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16504 /* 5700 BX chips need to have their TX producer index
16505 * mailboxes written twice to workaround a bug.
16507 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16509 /* If we are in PCI-X mode, enable register write workaround.
16511 * The workaround is to use indirect register accesses
16512 * for all chip writes not to mailbox registers.
16514 if (tg3_flag(tp, PCIX_MODE)) {
16517 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16519 /* The chip can have it's power management PCI config
16520 * space registers clobbered due to this bug.
16521 * So explicitly force the chip into D0 here.
16523 pci_read_config_dword(tp->pdev,
16524 tp->pdev->pm_cap + PCI_PM_CTRL,
16526 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16527 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16528 pci_write_config_dword(tp->pdev,
16529 tp->pdev->pm_cap + PCI_PM_CTRL,
16532 /* Also, force SERR#/PERR# in PCI command. */
16533 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16534 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16535 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16539 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16540 tg3_flag_set(tp, PCI_HIGH_SPEED);
16541 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16542 tg3_flag_set(tp, PCI_32BIT);
16544 /* Chip-specific fixup from Broadcom driver */
16545 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16546 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16547 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16548 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16551 /* Default fast path register access methods */
16552 tp->read32 = tg3_read32;
16553 tp->write32 = tg3_write32;
16554 tp->read32_mbox = tg3_read32;
16555 tp->write32_mbox = tg3_write32;
16556 tp->write32_tx_mbox = tg3_write32;
16557 tp->write32_rx_mbox = tg3_write32;
16559 /* Various workaround register access methods */
16560 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16561 tp->write32 = tg3_write_indirect_reg32;
16562 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16563 (tg3_flag(tp, PCI_EXPRESS) &&
16564 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16566 * Back to back register writes can cause problems on these
16567 * chips, the workaround is to read back all reg writes
16568 * except those to mailbox regs.
16570 * See tg3_write_indirect_reg32().
16572 tp->write32 = tg3_write_flush_reg32;
16575 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16576 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16577 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16578 tp->write32_rx_mbox = tg3_write_flush_reg32;
16581 if (tg3_flag(tp, ICH_WORKAROUND)) {
16582 tp->read32 = tg3_read_indirect_reg32;
16583 tp->write32 = tg3_write_indirect_reg32;
16584 tp->read32_mbox = tg3_read_indirect_mbox;
16585 tp->write32_mbox = tg3_write_indirect_mbox;
16586 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16587 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16592 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16593 pci_cmd &= ~PCI_COMMAND_MEMORY;
16594 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16596 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16597 tp->read32_mbox = tg3_read32_mbox_5906;
16598 tp->write32_mbox = tg3_write32_mbox_5906;
16599 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16600 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16603 if (tp->write32 == tg3_write_indirect_reg32 ||
16604 (tg3_flag(tp, PCIX_MODE) &&
16605 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16606 tg3_asic_rev(tp) == ASIC_REV_5701)))
16607 tg3_flag_set(tp, SRAM_USE_CONFIG);
16609 /* The memory arbiter has to be enabled in order for SRAM accesses
16610 * to succeed. Normally on powerup the tg3 chip firmware will make
16611 * sure it is enabled, but other entities such as system netboot
16612 * code might disable it.
16614 val = tr32(MEMARB_MODE);
16615 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16617 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16618 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16619 tg3_flag(tp, 5780_CLASS)) {
16620 if (tg3_flag(tp, PCIX_MODE)) {
16621 pci_read_config_dword(tp->pdev,
16622 tp->pcix_cap + PCI_X_STATUS,
16624 tp->pci_fn = val & 0x7;
16626 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16627 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16628 tg3_asic_rev(tp) == ASIC_REV_5720) {
16629 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16630 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16631 val = tr32(TG3_CPMU_STATUS);
16633 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16634 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16636 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16637 TG3_CPMU_STATUS_FSHFT_5719;
16640 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16641 tp->write32_tx_mbox = tg3_write_flush_reg32;
16642 tp->write32_rx_mbox = tg3_write_flush_reg32;
16645 /* Get eeprom hw config before calling tg3_set_power_state().
16646 * In particular, the TG3_FLAG_IS_NIC flag must be
16647 * determined before calling tg3_set_power_state() so that
16648 * we know whether or not to switch out of Vaux power.
16649 * When the flag is set, it means that GPIO1 is used for eeprom
16650 * write protect and also implies that it is a LOM where GPIOs
16651 * are not used to switch power.
16653 tg3_get_eeprom_hw_cfg(tp);
16655 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16656 tg3_flag_clear(tp, TSO_CAPABLE);
16657 tg3_flag_clear(tp, TSO_BUG);
16658 tp->fw_needed = NULL;
16661 if (tg3_flag(tp, ENABLE_APE)) {
16662 /* Allow reads and writes to the
16663 * APE register and memory space.
16665 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16666 PCISTATE_ALLOW_APE_SHMEM_WR |
16667 PCISTATE_ALLOW_APE_PSPACE_WR;
16668 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16671 tg3_ape_lock_init(tp);
16672 tp->ape_hb_interval =
16673 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16676 /* Set up tp->grc_local_ctrl before calling
16677 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16678 * will bring 5700's external PHY out of reset.
16679 * It is also used as eeprom write protect on LOMs.
16681 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16682 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16683 tg3_flag(tp, EEPROM_WRITE_PROT))
16684 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16685 GRC_LCLCTRL_GPIO_OUTPUT1);
16686 /* Unused GPIO3 must be driven as output on 5752 because there
16687 * are no pull-up resistors on unused GPIO pins.
16689 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16690 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16692 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16693 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16694 tg3_flag(tp, 57765_CLASS))
16695 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16697 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16698 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16699 /* Turn off the debug UART. */
16700 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16701 if (tg3_flag(tp, IS_NIC))
16702 /* Keep VMain power. */
16703 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16704 GRC_LCLCTRL_GPIO_OUTPUT0;
16707 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16708 tp->grc_local_ctrl |=
16709 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16711 /* Switch out of Vaux if it is a NIC */
16712 tg3_pwrsrc_switch_to_vmain(tp);
16714 /* Derive initial jumbo mode from MTU assigned in
16715 * ether_setup() via the alloc_etherdev() call
16717 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16718 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16720 /* Determine WakeOnLan speed to use. */
16721 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16722 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16723 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16724 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16725 tg3_flag_clear(tp, WOL_SPEED_100MB);
16727 tg3_flag_set(tp, WOL_SPEED_100MB);
16730 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16731 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16733 /* A few boards don't want Ethernet@WireSpeed phy feature */
16734 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16735 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16736 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16737 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16738 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16739 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16740 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16742 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16743 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16744 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16745 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16746 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16748 if (tg3_flag(tp, 5705_PLUS) &&
16749 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16750 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16751 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16752 !tg3_flag(tp, 57765_PLUS)) {
16753 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16754 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16755 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16756 tg3_asic_rev(tp) == ASIC_REV_5761) {
16757 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16758 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16759 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16760 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16761 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16763 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16766 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16767 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16768 tp->phy_otp = tg3_read_otp_phycfg(tp);
16769 if (tp->phy_otp == 0)
16770 tp->phy_otp = TG3_OTP_DEFAULT;
16773 if (tg3_flag(tp, CPMU_PRESENT))
16774 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16776 tp->mi_mode = MAC_MI_MODE_BASE;
16778 tp->coalesce_mode = 0;
16779 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16780 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16781 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16783 /* Set these bits to enable statistics workaround. */
16784 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16785 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16786 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16787 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16788 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16789 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16792 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16793 tg3_asic_rev(tp) == ASIC_REV_57780)
16794 tg3_flag_set(tp, USE_PHYLIB);
16796 err = tg3_mdio_init(tp);
16800 /* Initialize data/descriptor byte/word swapping. */
16801 val = tr32(GRC_MODE);
16802 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16803 tg3_asic_rev(tp) == ASIC_REV_5762)
16804 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16805 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16806 GRC_MODE_B2HRX_ENABLE |
16807 GRC_MODE_HTX2B_ENABLE |
16808 GRC_MODE_HOST_STACKUP);
16810 val &= GRC_MODE_HOST_STACKUP;
16812 tw32(GRC_MODE, val | tp->grc_mode);
16814 tg3_switch_clocks(tp);
16816 /* Clear this out for sanity. */
16817 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16819 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16820 tw32(TG3PCI_REG_BASE_ADDR, 0);
16822 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16824 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16825 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16826 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16827 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16828 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16829 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16830 void __iomem *sram_base;
16832 /* Write some dummy words into the SRAM status block
16833 * area, see if it reads back correctly. If the return
16834 * value is bad, force enable the PCIX workaround.
16836 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16838 writel(0x00000000, sram_base);
16839 writel(0x00000000, sram_base + 4);
16840 writel(0xffffffff, sram_base + 4);
16841 if (readl(sram_base) != 0x00000000)
16842 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16847 tg3_nvram_init(tp);
16849 /* If the device has an NVRAM, no need to load patch firmware */
16850 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16851 !tg3_flag(tp, NO_NVRAM))
16852 tp->fw_needed = NULL;
16854 grc_misc_cfg = tr32(GRC_MISC_CFG);
16855 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16857 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16858 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16859 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16860 tg3_flag_set(tp, IS_5788);
16862 if (!tg3_flag(tp, IS_5788) &&
16863 tg3_asic_rev(tp) != ASIC_REV_5700)
16864 tg3_flag_set(tp, TAGGED_STATUS);
16865 if (tg3_flag(tp, TAGGED_STATUS)) {
16866 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16867 HOSTCC_MODE_CLRTICK_TXBD);
16869 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16870 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16871 tp->misc_host_ctrl);
16874 /* Preserve the APE MAC_MODE bits */
16875 if (tg3_flag(tp, ENABLE_APE))
16876 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16880 if (tg3_10_100_only_device(tp, ent))
16881 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16883 err = tg3_phy_probe(tp);
16885 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16886 /* ... but do not return immediately ... */
16891 tg3_read_fw_ver(tp);
16893 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16894 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16896 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16897 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16899 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16902 /* 5700 {AX,BX} chips have a broken status block link
16903 * change bit implementation, so we must use the
16904 * status register in those cases.
16906 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16907 tg3_flag_set(tp, USE_LINKCHG_REG);
16909 tg3_flag_clear(tp, USE_LINKCHG_REG);
16911 /* The led_ctrl is set during tg3_phy_probe, here we might
16912 * have to force the link status polling mechanism based
16913 * upon subsystem IDs.
16915 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16916 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16917 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16918 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16919 tg3_flag_set(tp, USE_LINKCHG_REG);
16922 /* For all SERDES we poll the MAC status register. */
16923 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16924 tg3_flag_set(tp, POLL_SERDES);
16926 tg3_flag_clear(tp, POLL_SERDES);
16928 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16929 tg3_flag_set(tp, POLL_CPMU_LINK);
16931 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16932 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16933 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16934 tg3_flag(tp, PCIX_MODE)) {
16935 tp->rx_offset = NET_SKB_PAD;
16936 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16937 tp->rx_copy_thresh = ~(u16)0;
16941 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16942 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16943 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16945 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16947 /* Increment the rx prod index on the rx std ring by at most
16948 * 8 for these chips to workaround hw errata.
16950 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16951 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16952 tg3_asic_rev(tp) == ASIC_REV_5755)
16953 tp->rx_std_max_post = 8;
16955 if (tg3_flag(tp, ASPM_WORKAROUND))
16956 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16957 PCIE_PWR_MGMT_L1_THRESH_MSK;
16962 #ifdef CONFIG_SPARC
16963 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16965 struct net_device *dev = tp->dev;
16966 struct pci_dev *pdev = tp->pdev;
16967 struct device_node *dp = pci_device_to_OF_node(pdev);
16968 const unsigned char *addr;
16971 addr = of_get_property(dp, "local-mac-address", &len);
16972 if (addr && len == ETH_ALEN) {
16973 memcpy(dev->dev_addr, addr, ETH_ALEN);
16979 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16981 struct net_device *dev = tp->dev;
16983 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16988 static int tg3_get_device_address(struct tg3 *tp)
16990 struct net_device *dev = tp->dev;
16991 u32 hi, lo, mac_offset;
16995 #ifdef CONFIG_SPARC
16996 if (!tg3_get_macaddr_sparc(tp))
17000 if (tg3_flag(tp, IS_SSB_CORE)) {
17001 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17002 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17007 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17008 tg3_flag(tp, 5780_CLASS)) {
17009 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17011 if (tg3_nvram_lock(tp))
17012 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17014 tg3_nvram_unlock(tp);
17015 } else if (tg3_flag(tp, 5717_PLUS)) {
17016 if (tp->pci_fn & 1)
17018 if (tp->pci_fn > 1)
17019 mac_offset += 0x18c;
17020 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17023 /* First try to get it from MAC address mailbox. */
17024 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17025 if ((hi >> 16) == 0x484b) {
17026 dev->dev_addr[0] = (hi >> 8) & 0xff;
17027 dev->dev_addr[1] = (hi >> 0) & 0xff;
17029 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17030 dev->dev_addr[2] = (lo >> 24) & 0xff;
17031 dev->dev_addr[3] = (lo >> 16) & 0xff;
17032 dev->dev_addr[4] = (lo >> 8) & 0xff;
17033 dev->dev_addr[5] = (lo >> 0) & 0xff;
17035 /* Some old bootcode may report a 0 MAC address in SRAM */
17036 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17039 /* Next, try NVRAM. */
17040 if (!tg3_flag(tp, NO_NVRAM) &&
17041 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17042 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17043 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17044 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17046 /* Finally just fetch it out of the MAC control regs. */
17048 hi = tr32(MAC_ADDR_0_HIGH);
17049 lo = tr32(MAC_ADDR_0_LOW);
17051 dev->dev_addr[5] = lo & 0xff;
17052 dev->dev_addr[4] = (lo >> 8) & 0xff;
17053 dev->dev_addr[3] = (lo >> 16) & 0xff;
17054 dev->dev_addr[2] = (lo >> 24) & 0xff;
17055 dev->dev_addr[1] = hi & 0xff;
17056 dev->dev_addr[0] = (hi >> 8) & 0xff;
17060 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17061 #ifdef CONFIG_SPARC
17062 if (!tg3_get_default_macaddr_sparc(tp))
17070 #define BOUNDARY_SINGLE_CACHELINE 1
17071 #define BOUNDARY_MULTI_CACHELINE 2
17073 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17075 int cacheline_size;
17079 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17081 cacheline_size = 1024;
17083 cacheline_size = (int) byte * 4;
17085 /* On 5703 and later chips, the boundary bits have no
17088 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17089 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17090 !tg3_flag(tp, PCI_EXPRESS))
17093 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17094 goal = BOUNDARY_MULTI_CACHELINE;
17096 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17097 goal = BOUNDARY_SINGLE_CACHELINE;
17103 if (tg3_flag(tp, 57765_PLUS)) {
17104 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17111 /* PCI controllers on most RISC systems tend to disconnect
17112 * when a device tries to burst across a cache-line boundary.
17113 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17115 * Unfortunately, for PCI-E there are only limited
17116 * write-side controls for this, and thus for reads
17117 * we will still get the disconnects. We'll also waste
17118 * these PCI cycles for both read and write for chips
17119 * other than 5700 and 5701 which do not implement the
17122 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17123 switch (cacheline_size) {
17128 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17129 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17130 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17132 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17133 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17138 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17139 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17143 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17144 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17147 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17148 switch (cacheline_size) {
17152 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17153 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17154 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17160 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17161 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17165 switch (cacheline_size) {
17167 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17168 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17169 DMA_RWCTRL_WRITE_BNDRY_16);
17174 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17175 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17176 DMA_RWCTRL_WRITE_BNDRY_32);
17181 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17182 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17183 DMA_RWCTRL_WRITE_BNDRY_64);
17188 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17189 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17190 DMA_RWCTRL_WRITE_BNDRY_128);
17195 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17196 DMA_RWCTRL_WRITE_BNDRY_256);
17199 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17200 DMA_RWCTRL_WRITE_BNDRY_512);
17204 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17205 DMA_RWCTRL_WRITE_BNDRY_1024);
17214 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17215 int size, bool to_device)
17217 struct tg3_internal_buffer_desc test_desc;
17218 u32 sram_dma_descs;
17221 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17223 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17224 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17225 tw32(RDMAC_STATUS, 0);
17226 tw32(WDMAC_STATUS, 0);
17228 tw32(BUFMGR_MODE, 0);
17229 tw32(FTQ_RESET, 0);
17231 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17232 test_desc.addr_lo = buf_dma & 0xffffffff;
17233 test_desc.nic_mbuf = 0x00002100;
17234 test_desc.len = size;
17237 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17238 * the *second* time the tg3 driver was getting loaded after an
17241 * Broadcom tells me:
17242 * ...the DMA engine is connected to the GRC block and a DMA
17243 * reset may affect the GRC block in some unpredictable way...
17244 * The behavior of resets to individual blocks has not been tested.
17246 * Broadcom noted the GRC reset will also reset all sub-components.
17249 test_desc.cqid_sqid = (13 << 8) | 2;
17251 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17254 test_desc.cqid_sqid = (16 << 8) | 7;
17256 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17259 test_desc.flags = 0x00000005;
17261 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17264 val = *(((u32 *)&test_desc) + i);
17265 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17266 sram_dma_descs + (i * sizeof(u32)));
17267 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17269 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17272 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17274 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17277 for (i = 0; i < 40; i++) {
17281 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17283 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17284 if ((val & 0xffff) == sram_dma_descs) {
17295 #define TEST_BUFFER_SIZE 0x2000
17297 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17298 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17302 static int tg3_test_dma(struct tg3 *tp)
17304 dma_addr_t buf_dma;
17305 u32 *buf, saved_dma_rwctrl;
17308 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17309 &buf_dma, GFP_KERNEL);
17315 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17316 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17318 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17320 if (tg3_flag(tp, 57765_PLUS))
17323 if (tg3_flag(tp, PCI_EXPRESS)) {
17324 /* DMA read watermark not used on PCIE */
17325 tp->dma_rwctrl |= 0x00180000;
17326 } else if (!tg3_flag(tp, PCIX_MODE)) {
17327 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17328 tg3_asic_rev(tp) == ASIC_REV_5750)
17329 tp->dma_rwctrl |= 0x003f0000;
17331 tp->dma_rwctrl |= 0x003f000f;
17333 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17334 tg3_asic_rev(tp) == ASIC_REV_5704) {
17335 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17336 u32 read_water = 0x7;
17338 /* If the 5704 is behind the EPB bridge, we can
17339 * do the less restrictive ONE_DMA workaround for
17340 * better performance.
17342 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17343 tg3_asic_rev(tp) == ASIC_REV_5704)
17344 tp->dma_rwctrl |= 0x8000;
17345 else if (ccval == 0x6 || ccval == 0x7)
17346 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17348 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17350 /* Set bit 23 to enable PCIX hw bug fix */
17352 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17353 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17355 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17356 /* 5780 always in PCIX mode */
17357 tp->dma_rwctrl |= 0x00144000;
17358 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17359 /* 5714 always in PCIX mode */
17360 tp->dma_rwctrl |= 0x00148000;
17362 tp->dma_rwctrl |= 0x001b000f;
17365 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17366 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17368 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17369 tg3_asic_rev(tp) == ASIC_REV_5704)
17370 tp->dma_rwctrl &= 0xfffffff0;
17372 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17373 tg3_asic_rev(tp) == ASIC_REV_5701) {
17374 /* Remove this if it causes problems for some boards. */
17375 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17377 /* On 5700/5701 chips, we need to set this bit.
17378 * Otherwise the chip will issue cacheline transactions
17379 * to streamable DMA memory with not all the byte
17380 * enables turned on. This is an error on several
17381 * RISC PCI controllers, in particular sparc64.
17383 * On 5703/5704 chips, this bit has been reassigned
17384 * a different meaning. In particular, it is used
17385 * on those chips to enable a PCI-X workaround.
17387 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17390 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17393 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17394 tg3_asic_rev(tp) != ASIC_REV_5701)
17397 /* It is best to perform DMA test with maximum write burst size
17398 * to expose the 5700/5701 write DMA bug.
17400 saved_dma_rwctrl = tp->dma_rwctrl;
17401 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17402 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17407 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17410 /* Send the buffer to the chip. */
17411 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17413 dev_err(&tp->pdev->dev,
17414 "%s: Buffer write failed. err = %d\n",
17419 /* Now read it back. */
17420 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17422 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17423 "err = %d\n", __func__, ret);
17428 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17432 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17433 DMA_RWCTRL_WRITE_BNDRY_16) {
17434 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17435 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17436 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17439 dev_err(&tp->pdev->dev,
17440 "%s: Buffer corrupted on read back! "
17441 "(%d != %d)\n", __func__, p[i], i);
17447 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17453 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17454 DMA_RWCTRL_WRITE_BNDRY_16) {
17455 /* DMA test passed without adjusting DMA boundary,
17456 * now look for chipsets that are known to expose the
17457 * DMA bug without failing the test.
17459 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17460 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17461 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17463 /* Safe to use the calculated DMA boundary. */
17464 tp->dma_rwctrl = saved_dma_rwctrl;
17467 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17471 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17476 static void tg3_init_bufmgr_config(struct tg3 *tp)
17478 if (tg3_flag(tp, 57765_PLUS)) {
17479 tp->bufmgr_config.mbuf_read_dma_low_water =
17480 DEFAULT_MB_RDMA_LOW_WATER_5705;
17481 tp->bufmgr_config.mbuf_mac_rx_low_water =
17482 DEFAULT_MB_MACRX_LOW_WATER_57765;
17483 tp->bufmgr_config.mbuf_high_water =
17484 DEFAULT_MB_HIGH_WATER_57765;
17486 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17487 DEFAULT_MB_RDMA_LOW_WATER_5705;
17488 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17489 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17490 tp->bufmgr_config.mbuf_high_water_jumbo =
17491 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17492 } else if (tg3_flag(tp, 5705_PLUS)) {
17493 tp->bufmgr_config.mbuf_read_dma_low_water =
17494 DEFAULT_MB_RDMA_LOW_WATER_5705;
17495 tp->bufmgr_config.mbuf_mac_rx_low_water =
17496 DEFAULT_MB_MACRX_LOW_WATER_5705;
17497 tp->bufmgr_config.mbuf_high_water =
17498 DEFAULT_MB_HIGH_WATER_5705;
17499 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17500 tp->bufmgr_config.mbuf_mac_rx_low_water =
17501 DEFAULT_MB_MACRX_LOW_WATER_5906;
17502 tp->bufmgr_config.mbuf_high_water =
17503 DEFAULT_MB_HIGH_WATER_5906;
17506 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17507 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17508 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17509 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17510 tp->bufmgr_config.mbuf_high_water_jumbo =
17511 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17513 tp->bufmgr_config.mbuf_read_dma_low_water =
17514 DEFAULT_MB_RDMA_LOW_WATER;
17515 tp->bufmgr_config.mbuf_mac_rx_low_water =
17516 DEFAULT_MB_MACRX_LOW_WATER;
17517 tp->bufmgr_config.mbuf_high_water =
17518 DEFAULT_MB_HIGH_WATER;
17520 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17521 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17522 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17523 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17524 tp->bufmgr_config.mbuf_high_water_jumbo =
17525 DEFAULT_MB_HIGH_WATER_JUMBO;
17528 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17529 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17532 static char *tg3_phy_string(struct tg3 *tp)
17534 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17535 case TG3_PHY_ID_BCM5400: return "5400";
17536 case TG3_PHY_ID_BCM5401: return "5401";
17537 case TG3_PHY_ID_BCM5411: return "5411";
17538 case TG3_PHY_ID_BCM5701: return "5701";
17539 case TG3_PHY_ID_BCM5703: return "5703";
17540 case TG3_PHY_ID_BCM5704: return "5704";
17541 case TG3_PHY_ID_BCM5705: return "5705";
17542 case TG3_PHY_ID_BCM5750: return "5750";
17543 case TG3_PHY_ID_BCM5752: return "5752";
17544 case TG3_PHY_ID_BCM5714: return "5714";
17545 case TG3_PHY_ID_BCM5780: return "5780";
17546 case TG3_PHY_ID_BCM5755: return "5755";
17547 case TG3_PHY_ID_BCM5787: return "5787";
17548 case TG3_PHY_ID_BCM5784: return "5784";
17549 case TG3_PHY_ID_BCM5756: return "5722/5756";
17550 case TG3_PHY_ID_BCM5906: return "5906";
17551 case TG3_PHY_ID_BCM5761: return "5761";
17552 case TG3_PHY_ID_BCM5718C: return "5718C";
17553 case TG3_PHY_ID_BCM5718S: return "5718S";
17554 case TG3_PHY_ID_BCM57765: return "57765";
17555 case TG3_PHY_ID_BCM5719C: return "5719C";
17556 case TG3_PHY_ID_BCM5720C: return "5720C";
17557 case TG3_PHY_ID_BCM5762: return "5762C";
17558 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17559 case 0: return "serdes";
17560 default: return "unknown";
17564 static char *tg3_bus_string(struct tg3 *tp, char *str)
17566 if (tg3_flag(tp, PCI_EXPRESS)) {
17567 strcpy(str, "PCI Express");
17569 } else if (tg3_flag(tp, PCIX_MODE)) {
17570 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17572 strcpy(str, "PCIX:");
17574 if ((clock_ctrl == 7) ||
17575 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17576 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17577 strcat(str, "133MHz");
17578 else if (clock_ctrl == 0)
17579 strcat(str, "33MHz");
17580 else if (clock_ctrl == 2)
17581 strcat(str, "50MHz");
17582 else if (clock_ctrl == 4)
17583 strcat(str, "66MHz");
17584 else if (clock_ctrl == 6)
17585 strcat(str, "100MHz");
17587 strcpy(str, "PCI:");
17588 if (tg3_flag(tp, PCI_HIGH_SPEED))
17589 strcat(str, "66MHz");
17591 strcat(str, "33MHz");
17593 if (tg3_flag(tp, PCI_32BIT))
17594 strcat(str, ":32-bit");
17596 strcat(str, ":64-bit");
17600 static void tg3_init_coal(struct tg3 *tp)
17602 struct ethtool_coalesce *ec = &tp->coal;
17604 memset(ec, 0, sizeof(*ec));
17605 ec->cmd = ETHTOOL_GCOALESCE;
17606 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17607 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17608 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17609 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17610 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17611 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17612 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17613 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17614 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17616 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17617 HOSTCC_MODE_CLRTICK_TXBD)) {
17618 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17619 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17620 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17621 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17624 if (tg3_flag(tp, 5705_PLUS)) {
17625 ec->rx_coalesce_usecs_irq = 0;
17626 ec->tx_coalesce_usecs_irq = 0;
17627 ec->stats_block_coalesce_usecs = 0;
17631 static int tg3_init_one(struct pci_dev *pdev,
17632 const struct pci_device_id *ent)
17634 struct net_device *dev;
17637 u32 sndmbx, rcvmbx, intmbx;
17639 u64 dma_mask, persist_dma_mask;
17640 netdev_features_t features = 0;
17642 printk_once(KERN_INFO "%s\n", version);
17644 err = pci_enable_device(pdev);
17646 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17650 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17652 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17653 goto err_out_disable_pdev;
17656 pci_set_master(pdev);
17658 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17661 goto err_out_free_res;
17664 SET_NETDEV_DEV(dev, &pdev->dev);
17666 tp = netdev_priv(dev);
17669 tp->rx_mode = TG3_DEF_RX_MODE;
17670 tp->tx_mode = TG3_DEF_TX_MODE;
17672 tp->pcierr_recovery = false;
17675 tp->msg_enable = tg3_debug;
17677 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17679 if (pdev_is_ssb_gige_core(pdev)) {
17680 tg3_flag_set(tp, IS_SSB_CORE);
17681 if (ssb_gige_must_flush_posted_writes(pdev))
17682 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17683 if (ssb_gige_one_dma_at_once(pdev))
17684 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17685 if (ssb_gige_have_roboswitch(pdev)) {
17686 tg3_flag_set(tp, USE_PHYLIB);
17687 tg3_flag_set(tp, ROBOSWITCH);
17689 if (ssb_gige_is_rgmii(pdev))
17690 tg3_flag_set(tp, RGMII_MODE);
17693 /* The word/byte swap controls here control register access byte
17694 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17697 tp->misc_host_ctrl =
17698 MISC_HOST_CTRL_MASK_PCI_INT |
17699 MISC_HOST_CTRL_WORD_SWAP |
17700 MISC_HOST_CTRL_INDIR_ACCESS |
17701 MISC_HOST_CTRL_PCISTATE_RW;
17703 /* The NONFRM (non-frame) byte/word swap controls take effect
17704 * on descriptor entries, anything which isn't packet data.
17706 * The StrongARM chips on the board (one for tx, one for rx)
17707 * are running in big-endian mode.
17709 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17710 GRC_MODE_WSWAP_NONFRM_DATA);
17711 #ifdef __BIG_ENDIAN
17712 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17714 spin_lock_init(&tp->lock);
17715 spin_lock_init(&tp->indirect_lock);
17716 INIT_WORK(&tp->reset_task, tg3_reset_task);
17718 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17720 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17722 goto err_out_free_dev;
17725 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17726 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17727 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17728 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17729 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17730 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17731 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17732 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17733 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17734 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17735 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17736 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17737 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17738 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17739 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17740 tg3_flag_set(tp, ENABLE_APE);
17741 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17742 if (!tp->aperegs) {
17743 dev_err(&pdev->dev,
17744 "Cannot map APE registers, aborting\n");
17746 goto err_out_iounmap;
17750 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17751 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17753 dev->ethtool_ops = &tg3_ethtool_ops;
17754 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17755 dev->netdev_ops = &tg3_netdev_ops;
17756 dev->irq = pdev->irq;
17758 err = tg3_get_invariants(tp, ent);
17760 dev_err(&pdev->dev,
17761 "Problem fetching invariants of chip, aborting\n");
17762 goto err_out_apeunmap;
17765 /* The EPB bridge inside 5714, 5715, and 5780 and any
17766 * device behind the EPB cannot support DMA addresses > 40-bit.
17767 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17768 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17769 * do DMA address check in tg3_start_xmit().
17771 if (tg3_flag(tp, IS_5788))
17772 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17773 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17774 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17775 #ifdef CONFIG_HIGHMEM
17776 dma_mask = DMA_BIT_MASK(64);
17779 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17781 /* Configure DMA attributes. */
17782 if (dma_mask > DMA_BIT_MASK(32)) {
17783 err = pci_set_dma_mask(pdev, dma_mask);
17785 features |= NETIF_F_HIGHDMA;
17786 err = pci_set_consistent_dma_mask(pdev,
17789 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17790 "DMA for consistent allocations\n");
17791 goto err_out_apeunmap;
17795 if (err || dma_mask == DMA_BIT_MASK(32)) {
17796 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17798 dev_err(&pdev->dev,
17799 "No usable DMA configuration, aborting\n");
17800 goto err_out_apeunmap;
17804 tg3_init_bufmgr_config(tp);
17806 /* 5700 B0 chips do not support checksumming correctly due
17807 * to hardware bugs.
17809 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17810 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17812 if (tg3_flag(tp, 5755_PLUS))
17813 features |= NETIF_F_IPV6_CSUM;
17816 /* TSO is on by default on chips that support hardware TSO.
17817 * Firmware TSO on older chips gives lower performance, so it
17818 * is off by default, but can be enabled using ethtool.
17820 if ((tg3_flag(tp, HW_TSO_1) ||
17821 tg3_flag(tp, HW_TSO_2) ||
17822 tg3_flag(tp, HW_TSO_3)) &&
17823 (features & NETIF_F_IP_CSUM))
17824 features |= NETIF_F_TSO;
17825 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17826 if (features & NETIF_F_IPV6_CSUM)
17827 features |= NETIF_F_TSO6;
17828 if (tg3_flag(tp, HW_TSO_3) ||
17829 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17830 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17831 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17832 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17833 tg3_asic_rev(tp) == ASIC_REV_57780)
17834 features |= NETIF_F_TSO_ECN;
17837 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17838 NETIF_F_HW_VLAN_CTAG_RX;
17839 dev->vlan_features |= features;
17842 * Add loopback capability only for a subset of devices that support
17843 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17844 * loopback for the remaining devices.
17846 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17847 !tg3_flag(tp, CPMU_PRESENT))
17848 /* Add the loopback capability */
17849 features |= NETIF_F_LOOPBACK;
17851 dev->hw_features |= features;
17852 dev->priv_flags |= IFF_UNICAST_FLT;
17854 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17855 dev->min_mtu = TG3_MIN_MTU;
17856 dev->max_mtu = TG3_MAX_MTU(tp);
17858 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17859 !tg3_flag(tp, TSO_CAPABLE) &&
17860 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17861 tg3_flag_set(tp, MAX_RXPEND_64);
17862 tp->rx_pending = 63;
17865 err = tg3_get_device_address(tp);
17867 dev_err(&pdev->dev,
17868 "Could not obtain valid ethernet address, aborting\n");
17869 goto err_out_apeunmap;
17872 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17873 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17874 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17875 for (i = 0; i < tp->irq_max; i++) {
17876 struct tg3_napi *tnapi = &tp->napi[i];
17879 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17881 tnapi->int_mbox = intmbx;
17887 tnapi->consmbox = rcvmbx;
17888 tnapi->prodmbox = sndmbx;
17891 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17893 tnapi->coal_now = HOSTCC_MODE_NOW;
17895 if (!tg3_flag(tp, SUPPORT_MSIX))
17899 * If we support MSIX, we'll be using RSS. If we're using
17900 * RSS, the first vector only handles link interrupts and the
17901 * remaining vectors handle rx and tx interrupts. Reuse the
17902 * mailbox values for the next iteration. The values we setup
17903 * above are still useful for the single vectored mode.
17917 * Reset chip in case UNDI or EFI driver did not shutdown
17918 * DMA self test will enable WDMAC and we'll see (spurious)
17919 * pending DMA on the PCI bus at that point.
17921 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17922 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17923 tg3_full_lock(tp, 0);
17924 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17925 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17926 tg3_full_unlock(tp);
17929 err = tg3_test_dma(tp);
17931 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17932 goto err_out_apeunmap;
17937 pci_set_drvdata(pdev, dev);
17939 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17940 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17941 tg3_asic_rev(tp) == ASIC_REV_5762)
17942 tg3_flag_set(tp, PTP_CAPABLE);
17944 tg3_timer_init(tp);
17946 tg3_carrier_off(tp);
17948 err = register_netdev(dev);
17950 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17951 goto err_out_apeunmap;
17954 if (tg3_flag(tp, PTP_CAPABLE)) {
17956 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17958 if (IS_ERR(tp->ptp_clock))
17959 tp->ptp_clock = NULL;
17962 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17963 tp->board_part_number,
17964 tg3_chip_rev_id(tp),
17965 tg3_bus_string(tp, str),
17968 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17971 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17972 ethtype = "10/100Base-TX";
17973 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17974 ethtype = "1000Base-SX";
17976 ethtype = "10/100/1000Base-T";
17978 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17979 "(WireSpeed[%d], EEE[%d])\n",
17980 tg3_phy_string(tp), ethtype,
17981 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17982 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17985 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17986 (dev->features & NETIF_F_RXCSUM) != 0,
17987 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17988 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17989 tg3_flag(tp, ENABLE_ASF) != 0,
17990 tg3_flag(tp, TSO_CAPABLE) != 0);
17991 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17993 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17994 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17996 pci_save_state(pdev);
18002 iounmap(tp->aperegs);
18003 tp->aperegs = NULL;
18016 pci_release_regions(pdev);
18018 err_out_disable_pdev:
18019 if (pci_is_enabled(pdev))
18020 pci_disable_device(pdev);
18024 static void tg3_remove_one(struct pci_dev *pdev)
18026 struct net_device *dev = pci_get_drvdata(pdev);
18029 struct tg3 *tp = netdev_priv(dev);
18033 release_firmware(tp->fw);
18035 tg3_reset_task_cancel(tp);
18037 if (tg3_flag(tp, USE_PHYLIB)) {
18042 unregister_netdev(dev);
18044 iounmap(tp->aperegs);
18045 tp->aperegs = NULL;
18052 pci_release_regions(pdev);
18053 pci_disable_device(pdev);
18057 #ifdef CONFIG_PM_SLEEP
18058 static int tg3_suspend(struct device *device)
18060 struct pci_dev *pdev = to_pci_dev(device);
18061 struct net_device *dev = pci_get_drvdata(pdev);
18062 struct tg3 *tp = netdev_priv(dev);
18067 if (!netif_running(dev))
18070 tg3_reset_task_cancel(tp);
18072 tg3_netif_stop(tp);
18074 tg3_timer_stop(tp);
18076 tg3_full_lock(tp, 1);
18077 tg3_disable_ints(tp);
18078 tg3_full_unlock(tp);
18080 netif_device_detach(dev);
18082 tg3_full_lock(tp, 0);
18083 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18084 tg3_flag_clear(tp, INIT_COMPLETE);
18085 tg3_full_unlock(tp);
18087 err = tg3_power_down_prepare(tp);
18091 tg3_full_lock(tp, 0);
18093 tg3_flag_set(tp, INIT_COMPLETE);
18094 err2 = tg3_restart_hw(tp, true);
18098 tg3_timer_start(tp);
18100 netif_device_attach(dev);
18101 tg3_netif_start(tp);
18104 tg3_full_unlock(tp);
18115 static int tg3_resume(struct device *device)
18117 struct pci_dev *pdev = to_pci_dev(device);
18118 struct net_device *dev = pci_get_drvdata(pdev);
18119 struct tg3 *tp = netdev_priv(dev);
18124 if (!netif_running(dev))
18127 netif_device_attach(dev);
18129 tg3_full_lock(tp, 0);
18131 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18133 tg3_flag_set(tp, INIT_COMPLETE);
18134 err = tg3_restart_hw(tp,
18135 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18139 tg3_timer_start(tp);
18141 tg3_netif_start(tp);
18144 tg3_full_unlock(tp);
18153 #endif /* CONFIG_PM_SLEEP */
18155 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18157 static void tg3_shutdown(struct pci_dev *pdev)
18159 struct net_device *dev = pci_get_drvdata(pdev);
18160 struct tg3 *tp = netdev_priv(dev);
18163 netif_device_detach(dev);
18165 if (netif_running(dev))
18168 if (system_state == SYSTEM_POWER_OFF)
18169 tg3_power_down(tp);
18175 * tg3_io_error_detected - called when PCI error is detected
18176 * @pdev: Pointer to PCI device
18177 * @state: The current pci connection state
18179 * This function is called after a PCI bus error affecting
18180 * this device has been detected.
18182 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18183 pci_channel_state_t state)
18185 struct net_device *netdev = pci_get_drvdata(pdev);
18186 struct tg3 *tp = netdev_priv(netdev);
18187 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18189 netdev_info(netdev, "PCI I/O error detected\n");
18193 /* We probably don't have netdev yet */
18194 if (!netdev || !netif_running(netdev))
18197 /* We needn't recover from permanent error */
18198 if (state == pci_channel_io_frozen)
18199 tp->pcierr_recovery = true;
18203 tg3_netif_stop(tp);
18205 tg3_timer_stop(tp);
18207 /* Want to make sure that the reset task doesn't run */
18208 tg3_reset_task_cancel(tp);
18210 netif_device_detach(netdev);
18212 /* Clean up software state, even if MMIO is blocked */
18213 tg3_full_lock(tp, 0);
18214 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18215 tg3_full_unlock(tp);
18218 if (state == pci_channel_io_perm_failure) {
18220 tg3_napi_enable(tp);
18223 err = PCI_ERS_RESULT_DISCONNECT;
18225 pci_disable_device(pdev);
18234 * tg3_io_slot_reset - called after the pci bus has been reset.
18235 * @pdev: Pointer to PCI device
18237 * Restart the card from scratch, as if from a cold-boot.
18238 * At this point, the card has exprienced a hard reset,
18239 * followed by fixups by BIOS, and has its config space
18240 * set up identically to what it was at cold boot.
18242 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18244 struct net_device *netdev = pci_get_drvdata(pdev);
18245 struct tg3 *tp = netdev_priv(netdev);
18246 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18251 if (pci_enable_device(pdev)) {
18252 dev_err(&pdev->dev,
18253 "Cannot re-enable PCI device after reset.\n");
18257 pci_set_master(pdev);
18258 pci_restore_state(pdev);
18259 pci_save_state(pdev);
18261 if (!netdev || !netif_running(netdev)) {
18262 rc = PCI_ERS_RESULT_RECOVERED;
18266 err = tg3_power_up(tp);
18270 rc = PCI_ERS_RESULT_RECOVERED;
18273 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18274 tg3_napi_enable(tp);
18283 * tg3_io_resume - called when traffic can start flowing again.
18284 * @pdev: Pointer to PCI device
18286 * This callback is called when the error recovery driver tells
18287 * us that its OK to resume normal operation.
18289 static void tg3_io_resume(struct pci_dev *pdev)
18291 struct net_device *netdev = pci_get_drvdata(pdev);
18292 struct tg3 *tp = netdev_priv(netdev);
18297 if (!netdev || !netif_running(netdev))
18300 tg3_full_lock(tp, 0);
18301 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18302 tg3_flag_set(tp, INIT_COMPLETE);
18303 err = tg3_restart_hw(tp, true);
18305 tg3_full_unlock(tp);
18306 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18310 netif_device_attach(netdev);
18312 tg3_timer_start(tp);
18314 tg3_netif_start(tp);
18316 tg3_full_unlock(tp);
18321 tp->pcierr_recovery = false;
18325 static const struct pci_error_handlers tg3_err_handler = {
18326 .error_detected = tg3_io_error_detected,
18327 .slot_reset = tg3_io_slot_reset,
18328 .resume = tg3_io_resume
18331 static struct pci_driver tg3_driver = {
18332 .name = DRV_MODULE_NAME,
18333 .id_table = tg3_pci_tbl,
18334 .probe = tg3_init_one,
18335 .remove = tg3_remove_one,
18336 .err_handler = &tg3_err_handler,
18337 .driver.pm = &tg3_pm_ops,
18338 .shutdown = tg3_shutdown,
18341 module_pci_driver(tg3_driver);