]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * forcedeth: Ethernet driver for NVIDIA nForce media access controllers. | |
3 | * | |
4 | * Note: This driver is a cleanroom reimplementation based on reverse | |
5 | * engineered documentation written by Carl-Daniel Hailfinger | |
6 | * and Andrew de Quincey. It's neither supported nor endorsed | |
7 | * by NVIDIA Corp. Use at your own risk. | |
8 | * | |
9 | * NVIDIA, nForce and other NVIDIA marks are trademarks or registered | |
10 | * trademarks of NVIDIA Corporation in the United States and other | |
11 | * countries. | |
12 | * | |
1836098f | 13 | * Copyright (C) 2003,4,5 Manfred Spraul |
1da177e4 LT |
14 | * Copyright (C) 2004 Andrew de Quincey (wol support) |
15 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane | |
16 | * IRQ rate fixes, bigendian fixes, cleanups, verification) | |
17 | * Copyright (c) 2004 NVIDIA Corporation | |
18 | * | |
19 | * This program is free software; you can redistribute it and/or modify | |
20 | * it under the terms of the GNU General Public License as published by | |
21 | * the Free Software Foundation; either version 2 of the License, or | |
22 | * (at your option) any later version. | |
23 | * | |
24 | * This program is distributed in the hope that it will be useful, | |
25 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
27 | * GNU General Public License for more details. | |
28 | * | |
29 | * You should have received a copy of the GNU General Public License | |
30 | * along with this program; if not, write to the Free Software | |
31 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
32 | * | |
33 | * Changelog: | |
34 | * 0.01: 05 Oct 2003: First release that compiles without warnings. | |
35 | * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs. | |
36 | * Check all PCI BARs for the register window. | |
37 | * udelay added to mii_rw. | |
38 | * 0.03: 06 Oct 2003: Initialize dev->irq. | |
39 | * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks. | |
40 | * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout. | |
41 | * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated, | |
42 | * irq mask updated | |
43 | * 0.07: 14 Oct 2003: Further irq mask updates. | |
44 | * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill | |
45 | * added into irq handler, NULL check for drain_ring. | |
46 | * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the | |
47 | * requested interrupt sources. | |
48 | * 0.10: 20 Oct 2003: First cleanup for release. | |
49 | * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased. | |
50 | * MAC Address init fix, set_multicast cleanup. | |
51 | * 0.12: 23 Oct 2003: Cleanups for release. | |
52 | * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10. | |
53 | * Set link speed correctly. start rx before starting | |
54 | * tx (nv_start_rx sets the link speed). | |
55 | * 0.14: 25 Oct 2003: Nic dependant irq mask. | |
56 | * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during | |
57 | * open. | |
58 | * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size | |
59 | * increased to 1628 bytes. | |
60 | * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from | |
61 | * the tx length. | |
62 | * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats | |
63 | * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac | |
64 | * addresses, really stop rx if already running | |
65 | * in nv_start_rx, clean up a bit. | |
66 | * 0.20: 07 Dec 2003: alloc fixes | |
67 | * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix. | |
68 | * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup | |
69 | * on close. | |
70 | * 0.23: 26 Jan 2004: various small cleanups | |
71 | * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces | |
72 | * 0.25: 09 Mar 2004: wol support | |
73 | * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes | |
74 | * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings, | |
75 | * added CK804/MCP04 device IDs, code fixes | |
76 | * for registers, link status and other minor fixes. | |
77 | * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe | |
78 | * 0.29: 31 Aug 2004: Add backup timer for link change notification. | |
79 | * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset | |
80 | * into nv_close, otherwise reenabling for wol can | |
81 | * cause DMA to kfree'd memory. | |
82 | * 0.31: 14 Nov 2004: ethtool support for getting/setting link | |
4ea7f299 | 83 | * capabilities. |
22c6d143 | 84 | * 0.32: 16 Apr 2005: RX_ERROR4 handling added. |
8f767fc8 MS |
85 | * 0.33: 16 May 2005: Support for MCP51 added. |
86 | * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics. | |
f49d16ef | 87 | * 0.35: 26 Jun 2005: Support for MCP55 added. |
dc8216c1 MS |
88 | * 0.36: 28 Jun 2005: Add jumbo frame support. |
89 | * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list | |
c2dba06d MS |
90 | * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of |
91 | * per-packet flags. | |
4ea7f299 AA |
92 | * 0.39: 18 Jul 2005: Add 64bit descriptor support. |
93 | * 0.40: 19 Jul 2005: Add support for mac address change. | |
94 | * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead | |
b3df9f81 | 95 | * of nv_remove |
4ea7f299 | 96 | * 0.42: 06 Aug 2005: Fix lack of link speed initialization |
1b1b3c9b | 97 | * in the second (and later) nv_open call |
4ea7f299 AA |
98 | * 0.43: 10 Aug 2005: Add support for tx checksum. |
99 | * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. | |
100 | * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check | |
a971c324 | 101 | * 0.46: 20 Oct 2005: Add irq optimization modes. |
7a33e45a | 102 | * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan. |
1836098f | 103 | * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single |
fa45459e | 104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. |
ee407b02 | 105 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. |
0832b25a | 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. |
d33a73c8 | 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. |
86a0f043 | 108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. |
84b3932b | 109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. |
eb91f61b | 110 | * 0.55: 22 Mar 2006: Add flow control (pause frame). |
ebe611a4 | 111 | * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. |
5070d340 | 112 | * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections. |
1da177e4 LT |
113 | * |
114 | * Known bugs: | |
115 | * We suspect that on some hardware no TX done interrupts are generated. | |
116 | * This means recovery from netif_stop_queue only happens if the hw timer | |
117 | * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT) | |
118 | * and the timer is active in the IRQMask, or if a rx packet arrives by chance. | |
119 | * If your hardware reliably generates tx done interrupts, then you can remove | |
120 | * DEV_NEED_TIMERIRQ from the driver_data flags. | |
121 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | |
122 | * superfluous timer interrupts from the nic. | |
123 | */ | |
e27cdba5 SH |
124 | #ifdef CONFIG_FORCEDETH_NAPI |
125 | #define DRIVERNAPI "-NAPI" | |
126 | #else | |
127 | #define DRIVERNAPI | |
128 | #endif | |
5070d340 | 129 | #define FORCEDETH_VERSION "0.57" |
1da177e4 LT |
130 | #define DRV_NAME "forcedeth" |
131 | ||
132 | #include <linux/module.h> | |
133 | #include <linux/types.h> | |
134 | #include <linux/pci.h> | |
135 | #include <linux/interrupt.h> | |
136 | #include <linux/netdevice.h> | |
137 | #include <linux/etherdevice.h> | |
138 | #include <linux/delay.h> | |
139 | #include <linux/spinlock.h> | |
140 | #include <linux/ethtool.h> | |
141 | #include <linux/timer.h> | |
142 | #include <linux/skbuff.h> | |
143 | #include <linux/mii.h> | |
144 | #include <linux/random.h> | |
145 | #include <linux/init.h> | |
22c6d143 | 146 | #include <linux/if_vlan.h> |
910638ae | 147 | #include <linux/dma-mapping.h> |
1da177e4 LT |
148 | |
149 | #include <asm/irq.h> | |
150 | #include <asm/io.h> | |
151 | #include <asm/uaccess.h> | |
152 | #include <asm/system.h> | |
153 | ||
154 | #if 0 | |
155 | #define dprintk printk | |
156 | #else | |
157 | #define dprintk(x...) do { } while (0) | |
158 | #endif | |
159 | ||
160 | ||
161 | /* | |
162 | * Hardware access: | |
163 | */ | |
164 | ||
c2dba06d MS |
165 | #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ |
166 | #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ | |
167 | #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ | |
ee73362c | 168 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ |
8a4ae7f2 | 169 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ |
ee407b02 | 170 | #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ |
d33a73c8 AA |
171 | #define DEV_HAS_MSI 0x0040 /* device supports MSI */ |
172 | #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | |
86a0f043 | 173 | #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ |
eb91f61b | 174 | #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ |
52da3578 | 175 | #define DEV_HAS_STATISTICS 0x0400 /* device supports hw statistics */ |
9589c77a | 176 | #define DEV_HAS_TEST_EXTENDED 0x0800 /* device supports extended diagnostic test */ |
1da177e4 LT |
177 | |
178 | enum { | |
179 | NvRegIrqStatus = 0x000, | |
180 | #define NVREG_IRQSTAT_MIIEVENT 0x040 | |
181 | #define NVREG_IRQSTAT_MASK 0x1ff | |
182 | NvRegIrqMask = 0x004, | |
183 | #define NVREG_IRQ_RX_ERROR 0x0001 | |
184 | #define NVREG_IRQ_RX 0x0002 | |
185 | #define NVREG_IRQ_RX_NOBUF 0x0004 | |
186 | #define NVREG_IRQ_TX_ERR 0x0008 | |
c2dba06d | 187 | #define NVREG_IRQ_TX_OK 0x0010 |
1da177e4 LT |
188 | #define NVREG_IRQ_TIMER 0x0020 |
189 | #define NVREG_IRQ_LINK 0x0040 | |
d33a73c8 AA |
190 | #define NVREG_IRQ_RX_FORCED 0x0080 |
191 | #define NVREG_IRQ_TX_FORCED 0x0100 | |
a971c324 AA |
192 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
193 | #define NVREG_IRQMASK_CPU 0x0040 | |
d33a73c8 AA |
194 | #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) |
195 | #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) | |
196 | #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) | |
c2dba06d MS |
197 | |
198 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ | |
d33a73c8 AA |
199 | NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ |
200 | NVREG_IRQ_TX_FORCED)) | |
1da177e4 LT |
201 | |
202 | NvRegUnknownSetupReg6 = 0x008, | |
203 | #define NVREG_UNKSETUP6_VAL 3 | |
204 | ||
205 | /* | |
206 | * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic | |
207 | * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms | |
208 | */ | |
209 | NvRegPollingInterval = 0x00c, | |
a971c324 AA |
210 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 |
211 | #define NVREG_POLL_DEFAULT_CPU 13 | |
d33a73c8 AA |
212 | NvRegMSIMap0 = 0x020, |
213 | NvRegMSIMap1 = 0x024, | |
214 | NvRegMSIIrqMask = 0x030, | |
215 | #define NVREG_MSI_VECTOR_0_ENABLED 0x01 | |
1da177e4 | 216 | NvRegMisc1 = 0x080, |
eb91f61b | 217 | #define NVREG_MISC1_PAUSE_TX 0x01 |
1da177e4 LT |
218 | #define NVREG_MISC1_HD 0x02 |
219 | #define NVREG_MISC1_FORCE 0x3b0f3c | |
220 | ||
86a0f043 AA |
221 | NvRegMacReset = 0x3c, |
222 | #define NVREG_MAC_RESET_ASSERT 0x0F3 | |
1da177e4 LT |
223 | NvRegTransmitterControl = 0x084, |
224 | #define NVREG_XMITCTL_START 0x01 | |
225 | NvRegTransmitterStatus = 0x088, | |
226 | #define NVREG_XMITSTAT_BUSY 0x01 | |
227 | ||
228 | NvRegPacketFilterFlags = 0x8c, | |
eb91f61b AA |
229 | #define NVREG_PFF_PAUSE_RX 0x08 |
230 | #define NVREG_PFF_ALWAYS 0x7F0000 | |
1da177e4 LT |
231 | #define NVREG_PFF_PROMISC 0x80 |
232 | #define NVREG_PFF_MYADDR 0x20 | |
9589c77a | 233 | #define NVREG_PFF_LOOPBACK 0x10 |
1da177e4 LT |
234 | |
235 | NvRegOffloadConfig = 0x90, | |
236 | #define NVREG_OFFLOAD_HOMEPHY 0x601 | |
237 | #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE | |
238 | NvRegReceiverControl = 0x094, | |
239 | #define NVREG_RCVCTL_START 0x01 | |
240 | NvRegReceiverStatus = 0x98, | |
241 | #define NVREG_RCVSTAT_BUSY 0x01 | |
242 | ||
243 | NvRegRandomSeed = 0x9c, | |
244 | #define NVREG_RNDSEED_MASK 0x00ff | |
245 | #define NVREG_RNDSEED_FORCE 0x7f00 | |
246 | #define NVREG_RNDSEED_FORCE2 0x2d00 | |
247 | #define NVREG_RNDSEED_FORCE3 0x7400 | |
248 | ||
9744e218 AA |
249 | NvRegTxDeferral = 0xA0, |
250 | #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f | |
251 | #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f | |
252 | #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f | |
253 | NvRegRxDeferral = 0xA4, | |
254 | #define NVREG_RX_DEFERRAL_DEFAULT 0x16 | |
1da177e4 LT |
255 | NvRegMacAddrA = 0xA8, |
256 | NvRegMacAddrB = 0xAC, | |
257 | NvRegMulticastAddrA = 0xB0, | |
258 | #define NVREG_MCASTADDRA_FORCE 0x01 | |
259 | NvRegMulticastAddrB = 0xB4, | |
260 | NvRegMulticastMaskA = 0xB8, | |
261 | NvRegMulticastMaskB = 0xBC, | |
262 | ||
263 | NvRegPhyInterface = 0xC0, | |
264 | #define PHY_RGMII 0x10000000 | |
265 | ||
266 | NvRegTxRingPhysAddr = 0x100, | |
267 | NvRegRxRingPhysAddr = 0x104, | |
268 | NvRegRingSizes = 0x108, | |
269 | #define NVREG_RINGSZ_TXSHIFT 0 | |
270 | #define NVREG_RINGSZ_RXSHIFT 16 | |
5070d340 AA |
271 | NvRegTransmitPoll = 0x10c, |
272 | #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000 | |
1da177e4 LT |
273 | NvRegLinkSpeed = 0x110, |
274 | #define NVREG_LINKSPEED_FORCE 0x10000 | |
275 | #define NVREG_LINKSPEED_10 1000 | |
276 | #define NVREG_LINKSPEED_100 100 | |
277 | #define NVREG_LINKSPEED_1000 50 | |
278 | #define NVREG_LINKSPEED_MASK (0xFFF) | |
279 | NvRegUnknownSetupReg5 = 0x130, | |
280 | #define NVREG_UNKSETUP5_BIT31 (1<<31) | |
95d161cb AA |
281 | NvRegTxWatermark = 0x13c, |
282 | #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010 | |
283 | #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000 | |
284 | #define NVREG_TX_WM_DESC2_3_1000 0xfe08000 | |
1da177e4 LT |
285 | NvRegTxRxControl = 0x144, |
286 | #define NVREG_TXRXCTL_KICK 0x0001 | |
287 | #define NVREG_TXRXCTL_BIT1 0x0002 | |
288 | #define NVREG_TXRXCTL_BIT2 0x0004 | |
289 | #define NVREG_TXRXCTL_IDLE 0x0008 | |
290 | #define NVREG_TXRXCTL_RESET 0x0010 | |
291 | #define NVREG_TXRXCTL_RXCHECK 0x0400 | |
8a4ae7f2 MS |
292 | #define NVREG_TXRXCTL_DESC_1 0 |
293 | #define NVREG_TXRXCTL_DESC_2 0x02100 | |
294 | #define NVREG_TXRXCTL_DESC_3 0x02200 | |
ee407b02 AA |
295 | #define NVREG_TXRXCTL_VLANSTRIP 0x00040 |
296 | #define NVREG_TXRXCTL_VLANINS 0x00080 | |
0832b25a AA |
297 | NvRegTxRingPhysAddrHigh = 0x148, |
298 | NvRegRxRingPhysAddrHigh = 0x14C, | |
eb91f61b AA |
299 | NvRegTxPauseFrame = 0x170, |
300 | #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 | |
301 | #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 | |
1da177e4 LT |
302 | NvRegMIIStatus = 0x180, |
303 | #define NVREG_MIISTAT_ERROR 0x0001 | |
304 | #define NVREG_MIISTAT_LINKCHANGE 0x0008 | |
305 | #define NVREG_MIISTAT_MASK 0x000f | |
306 | #define NVREG_MIISTAT_MASK2 0x000f | |
307 | NvRegUnknownSetupReg4 = 0x184, | |
308 | #define NVREG_UNKSETUP4_VAL 8 | |
309 | ||
310 | NvRegAdapterControl = 0x188, | |
311 | #define NVREG_ADAPTCTL_START 0x02 | |
312 | #define NVREG_ADAPTCTL_LINKUP 0x04 | |
313 | #define NVREG_ADAPTCTL_PHYVALID 0x40000 | |
314 | #define NVREG_ADAPTCTL_RUNNING 0x100000 | |
315 | #define NVREG_ADAPTCTL_PHYSHIFT 24 | |
316 | NvRegMIISpeed = 0x18c, | |
317 | #define NVREG_MIISPEED_BIT8 (1<<8) | |
318 | #define NVREG_MIIDELAY 5 | |
319 | NvRegMIIControl = 0x190, | |
320 | #define NVREG_MIICTL_INUSE 0x08000 | |
321 | #define NVREG_MIICTL_WRITE 0x00400 | |
322 | #define NVREG_MIICTL_ADDRSHIFT 5 | |
323 | NvRegMIIData = 0x194, | |
324 | NvRegWakeUpFlags = 0x200, | |
325 | #define NVREG_WAKEUPFLAGS_VAL 0x7770 | |
326 | #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 | |
327 | #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16 | |
328 | #define NVREG_WAKEUPFLAGS_D3SHIFT 12 | |
329 | #define NVREG_WAKEUPFLAGS_D2SHIFT 8 | |
330 | #define NVREG_WAKEUPFLAGS_D1SHIFT 4 | |
331 | #define NVREG_WAKEUPFLAGS_D0SHIFT 0 | |
332 | #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01 | |
333 | #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02 | |
334 | #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04 | |
335 | #define NVREG_WAKEUPFLAGS_ENABLE 0x1111 | |
336 | ||
337 | NvRegPatternCRC = 0x204, | |
338 | NvRegPatternMask = 0x208, | |
339 | NvRegPowerCap = 0x268, | |
340 | #define NVREG_POWERCAP_D3SUPP (1<<30) | |
341 | #define NVREG_POWERCAP_D2SUPP (1<<26) | |
342 | #define NVREG_POWERCAP_D1SUPP (1<<25) | |
343 | NvRegPowerState = 0x26c, | |
344 | #define NVREG_POWERSTATE_POWEREDUP 0x8000 | |
345 | #define NVREG_POWERSTATE_VALID 0x0100 | |
346 | #define NVREG_POWERSTATE_MASK 0x0003 | |
347 | #define NVREG_POWERSTATE_D0 0x0000 | |
348 | #define NVREG_POWERSTATE_D1 0x0001 | |
349 | #define NVREG_POWERSTATE_D2 0x0002 | |
350 | #define NVREG_POWERSTATE_D3 0x0003 | |
52da3578 AA |
351 | NvRegTxCnt = 0x280, |
352 | NvRegTxZeroReXmt = 0x284, | |
353 | NvRegTxOneReXmt = 0x288, | |
354 | NvRegTxManyReXmt = 0x28c, | |
355 | NvRegTxLateCol = 0x290, | |
356 | NvRegTxUnderflow = 0x294, | |
357 | NvRegTxLossCarrier = 0x298, | |
358 | NvRegTxExcessDef = 0x29c, | |
359 | NvRegTxRetryErr = 0x2a0, | |
360 | NvRegRxFrameErr = 0x2a4, | |
361 | NvRegRxExtraByte = 0x2a8, | |
362 | NvRegRxLateCol = 0x2ac, | |
363 | NvRegRxRunt = 0x2b0, | |
364 | NvRegRxFrameTooLong = 0x2b4, | |
365 | NvRegRxOverflow = 0x2b8, | |
366 | NvRegRxFCSErr = 0x2bc, | |
367 | NvRegRxFrameAlignErr = 0x2c0, | |
368 | NvRegRxLenErr = 0x2c4, | |
369 | NvRegRxUnicast = 0x2c8, | |
370 | NvRegRxMulticast = 0x2cc, | |
371 | NvRegRxBroadcast = 0x2d0, | |
372 | NvRegTxDef = 0x2d4, | |
373 | NvRegTxFrame = 0x2d8, | |
374 | NvRegRxCnt = 0x2dc, | |
375 | NvRegTxPause = 0x2e0, | |
376 | NvRegRxPause = 0x2e4, | |
377 | NvRegRxDropFrame = 0x2e8, | |
ee407b02 AA |
378 | NvRegVlanControl = 0x300, |
379 | #define NVREG_VLANCONTROL_ENABLE 0x2000 | |
d33a73c8 AA |
380 | NvRegMSIXMap0 = 0x3e0, |
381 | NvRegMSIXMap1 = 0x3e4, | |
382 | NvRegMSIXIrqStatus = 0x3f0, | |
86a0f043 AA |
383 | |
384 | NvRegPowerState2 = 0x600, | |
385 | #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 | |
386 | #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 | |
1da177e4 LT |
387 | }; |
388 | ||
389 | /* Big endian: should work, but is untested */ | |
390 | struct ring_desc { | |
a8bed49e SH |
391 | __le32 buf; |
392 | __le32 flaglen; | |
1da177e4 LT |
393 | }; |
394 | ||
ee73362c | 395 | struct ring_desc_ex { |
a8bed49e SH |
396 | __le32 bufhigh; |
397 | __le32 buflow; | |
398 | __le32 txvlan; | |
399 | __le32 flaglen; | |
ee73362c MS |
400 | }; |
401 | ||
f82a9352 | 402 | union ring_type { |
ee73362c MS |
403 | struct ring_desc* orig; |
404 | struct ring_desc_ex* ex; | |
f82a9352 | 405 | }; |
ee73362c | 406 | |
1da177e4 LT |
407 | #define FLAG_MASK_V1 0xffff0000 |
408 | #define FLAG_MASK_V2 0xffffc000 | |
409 | #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1) | |
410 | #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2) | |
411 | ||
412 | #define NV_TX_LASTPACKET (1<<16) | |
413 | #define NV_TX_RETRYERROR (1<<19) | |
c2dba06d | 414 | #define NV_TX_FORCED_INTERRUPT (1<<24) |
1da177e4 LT |
415 | #define NV_TX_DEFERRED (1<<26) |
416 | #define NV_TX_CARRIERLOST (1<<27) | |
417 | #define NV_TX_LATECOLLISION (1<<28) | |
418 | #define NV_TX_UNDERFLOW (1<<29) | |
419 | #define NV_TX_ERROR (1<<30) | |
420 | #define NV_TX_VALID (1<<31) | |
421 | ||
422 | #define NV_TX2_LASTPACKET (1<<29) | |
423 | #define NV_TX2_RETRYERROR (1<<18) | |
c2dba06d | 424 | #define NV_TX2_FORCED_INTERRUPT (1<<30) |
1da177e4 LT |
425 | #define NV_TX2_DEFERRED (1<<25) |
426 | #define NV_TX2_CARRIERLOST (1<<26) | |
427 | #define NV_TX2_LATECOLLISION (1<<27) | |
428 | #define NV_TX2_UNDERFLOW (1<<28) | |
429 | /* error and valid are the same for both */ | |
430 | #define NV_TX2_ERROR (1<<30) | |
431 | #define NV_TX2_VALID (1<<31) | |
ac9c1897 AA |
432 | #define NV_TX2_TSO (1<<28) |
433 | #define NV_TX2_TSO_SHIFT 14 | |
fa45459e AA |
434 | #define NV_TX2_TSO_MAX_SHIFT 14 |
435 | #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT) | |
8a4ae7f2 MS |
436 | #define NV_TX2_CHECKSUM_L3 (1<<27) |
437 | #define NV_TX2_CHECKSUM_L4 (1<<26) | |
1da177e4 | 438 | |
ee407b02 AA |
439 | #define NV_TX3_VLAN_TAG_PRESENT (1<<18) |
440 | ||
1da177e4 LT |
441 | #define NV_RX_DESCRIPTORVALID (1<<16) |
442 | #define NV_RX_MISSEDFRAME (1<<17) | |
443 | #define NV_RX_SUBSTRACT1 (1<<18) | |
444 | #define NV_RX_ERROR1 (1<<23) | |
445 | #define NV_RX_ERROR2 (1<<24) | |
446 | #define NV_RX_ERROR3 (1<<25) | |
447 | #define NV_RX_ERROR4 (1<<26) | |
448 | #define NV_RX_CRCERR (1<<27) | |
449 | #define NV_RX_OVERFLOW (1<<28) | |
450 | #define NV_RX_FRAMINGERR (1<<29) | |
451 | #define NV_RX_ERROR (1<<30) | |
452 | #define NV_RX_AVAIL (1<<31) | |
453 | ||
454 | #define NV_RX2_CHECKSUMMASK (0x1C000000) | |
455 | #define NV_RX2_CHECKSUMOK1 (0x10000000) | |
456 | #define NV_RX2_CHECKSUMOK2 (0x14000000) | |
457 | #define NV_RX2_CHECKSUMOK3 (0x18000000) | |
458 | #define NV_RX2_DESCRIPTORVALID (1<<29) | |
459 | #define NV_RX2_SUBSTRACT1 (1<<25) | |
460 | #define NV_RX2_ERROR1 (1<<18) | |
461 | #define NV_RX2_ERROR2 (1<<19) | |
462 | #define NV_RX2_ERROR3 (1<<20) | |
463 | #define NV_RX2_ERROR4 (1<<21) | |
464 | #define NV_RX2_CRCERR (1<<22) | |
465 | #define NV_RX2_OVERFLOW (1<<23) | |
466 | #define NV_RX2_FRAMINGERR (1<<24) | |
467 | /* error and avail are the same for both */ | |
468 | #define NV_RX2_ERROR (1<<30) | |
469 | #define NV_RX2_AVAIL (1<<31) | |
470 | ||
ee407b02 AA |
471 | #define NV_RX3_VLAN_TAG_PRESENT (1<<16) |
472 | #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) | |
473 | ||
1da177e4 | 474 | /* Miscelaneous hardware related defines: */ |
86a0f043 AA |
475 | #define NV_PCI_REGSZ_VER1 0x270 |
476 | #define NV_PCI_REGSZ_VER2 0x604 | |
1da177e4 LT |
477 | |
478 | /* various timeout delays: all in usec */ | |
479 | #define NV_TXRX_RESET_DELAY 4 | |
480 | #define NV_TXSTOP_DELAY1 10 | |
481 | #define NV_TXSTOP_DELAY1MAX 500000 | |
482 | #define NV_TXSTOP_DELAY2 100 | |
483 | #define NV_RXSTOP_DELAY1 10 | |
484 | #define NV_RXSTOP_DELAY1MAX 500000 | |
485 | #define NV_RXSTOP_DELAY2 100 | |
486 | #define NV_SETUP5_DELAY 5 | |
487 | #define NV_SETUP5_DELAYMAX 50000 | |
488 | #define NV_POWERUP_DELAY 5 | |
489 | #define NV_POWERUP_DELAYMAX 5000 | |
490 | #define NV_MIIBUSY_DELAY 50 | |
491 | #define NV_MIIPHY_DELAY 10 | |
492 | #define NV_MIIPHY_DELAYMAX 10000 | |
86a0f043 | 493 | #define NV_MAC_RESET_DELAY 64 |
1da177e4 LT |
494 | |
495 | #define NV_WAKEUPPATTERNS 5 | |
496 | #define NV_WAKEUPMASKENTRIES 4 | |
497 | ||
498 | /* General driver defaults */ | |
499 | #define NV_WATCHDOG_TIMEO (5*HZ) | |
500 | ||
eafa59f6 AA |
501 | #define RX_RING_DEFAULT 128 |
502 | #define TX_RING_DEFAULT 256 | |
503 | #define RX_RING_MIN 128 | |
504 | #define TX_RING_MIN 64 | |
505 | #define RING_MAX_DESC_VER_1 1024 | |
506 | #define RING_MAX_DESC_VER_2_3 16384 | |
f3b197ac | 507 | /* |
eafa59f6 AA |
508 | * Difference between the get and put pointers for the tx ring. |
509 | * This is used to throttle the amount of data outstanding in the | |
510 | * tx ring. | |
1da177e4 | 511 | */ |
eafa59f6 | 512 | #define TX_LIMIT_DIFFERENCE 1 |
1da177e4 LT |
513 | |
514 | /* rx/tx mac addr + type + vlan + align + slack*/ | |
d81c0983 MS |
515 | #define NV_RX_HEADERS (64) |
516 | /* even more slack. */ | |
517 | #define NV_RX_ALLOC_PAD (64) | |
518 | ||
519 | /* maximum mtu size */ | |
520 | #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */ | |
521 | #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */ | |
1da177e4 LT |
522 | |
523 | #define OOM_REFILL (1+HZ/20) | |
524 | #define POLL_WAIT (1+HZ/100) | |
525 | #define LINK_TIMEOUT (3*HZ) | |
52da3578 | 526 | #define STATS_INTERVAL (10*HZ) |
1da177e4 | 527 | |
f3b197ac | 528 | /* |
1da177e4 | 529 | * desc_ver values: |
8a4ae7f2 MS |
530 | * The nic supports three different descriptor types: |
531 | * - DESC_VER_1: Original | |
532 | * - DESC_VER_2: support for jumbo frames. | |
533 | * - DESC_VER_3: 64-bit format. | |
1da177e4 | 534 | */ |
8a4ae7f2 MS |
535 | #define DESC_VER_1 1 |
536 | #define DESC_VER_2 2 | |
537 | #define DESC_VER_3 3 | |
1da177e4 LT |
538 | |
539 | /* PHY defines */ | |
540 | #define PHY_OUI_MARVELL 0x5043 | |
541 | #define PHY_OUI_CICADA 0x03f1 | |
542 | #define PHYID1_OUI_MASK 0x03ff | |
543 | #define PHYID1_OUI_SHFT 6 | |
544 | #define PHYID2_OUI_MASK 0xfc00 | |
545 | #define PHYID2_OUI_SHFT 10 | |
546 | #define PHY_INIT1 0x0f000 | |
547 | #define PHY_INIT2 0x0e00 | |
548 | #define PHY_INIT3 0x01000 | |
549 | #define PHY_INIT4 0x0200 | |
550 | #define PHY_INIT5 0x0004 | |
551 | #define PHY_INIT6 0x02000 | |
552 | #define PHY_GIGABIT 0x0100 | |
553 | ||
554 | #define PHY_TIMEOUT 0x1 | |
555 | #define PHY_ERROR 0x2 | |
556 | ||
557 | #define PHY_100 0x1 | |
558 | #define PHY_1000 0x2 | |
559 | #define PHY_HALF 0x100 | |
560 | ||
eb91f61b AA |
561 | #define NV_PAUSEFRAME_RX_CAPABLE 0x0001 |
562 | #define NV_PAUSEFRAME_TX_CAPABLE 0x0002 | |
563 | #define NV_PAUSEFRAME_RX_ENABLE 0x0004 | |
564 | #define NV_PAUSEFRAME_TX_ENABLE 0x0008 | |
b6d0773f AA |
565 | #define NV_PAUSEFRAME_RX_REQ 0x0010 |
566 | #define NV_PAUSEFRAME_TX_REQ 0x0020 | |
567 | #define NV_PAUSEFRAME_AUTONEG 0x0040 | |
1da177e4 | 568 | |
d33a73c8 AA |
569 | /* MSI/MSI-X defines */ |
570 | #define NV_MSI_X_MAX_VECTORS 8 | |
571 | #define NV_MSI_X_VECTORS_MASK 0x000f | |
572 | #define NV_MSI_CAPABLE 0x0010 | |
573 | #define NV_MSI_X_CAPABLE 0x0020 | |
574 | #define NV_MSI_ENABLED 0x0040 | |
575 | #define NV_MSI_X_ENABLED 0x0080 | |
576 | ||
577 | #define NV_MSI_X_VECTOR_ALL 0x0 | |
578 | #define NV_MSI_X_VECTOR_RX 0x0 | |
579 | #define NV_MSI_X_VECTOR_TX 0x1 | |
580 | #define NV_MSI_X_VECTOR_OTHER 0x2 | |
1da177e4 | 581 | |
52da3578 AA |
582 | /* statistics */ |
583 | struct nv_ethtool_str { | |
584 | char name[ETH_GSTRING_LEN]; | |
585 | }; | |
586 | ||
587 | static const struct nv_ethtool_str nv_estats_str[] = { | |
588 | { "tx_bytes" }, | |
589 | { "tx_zero_rexmt" }, | |
590 | { "tx_one_rexmt" }, | |
591 | { "tx_many_rexmt" }, | |
592 | { "tx_late_collision" }, | |
593 | { "tx_fifo_errors" }, | |
594 | { "tx_carrier_errors" }, | |
595 | { "tx_excess_deferral" }, | |
596 | { "tx_retry_error" }, | |
597 | { "tx_deferral" }, | |
598 | { "tx_packets" }, | |
599 | { "tx_pause" }, | |
600 | { "rx_frame_error" }, | |
601 | { "rx_extra_byte" }, | |
602 | { "rx_late_collision" }, | |
603 | { "rx_runt" }, | |
604 | { "rx_frame_too_long" }, | |
605 | { "rx_over_errors" }, | |
606 | { "rx_crc_errors" }, | |
607 | { "rx_frame_align_error" }, | |
608 | { "rx_length_error" }, | |
609 | { "rx_unicast" }, | |
610 | { "rx_multicast" }, | |
611 | { "rx_broadcast" }, | |
612 | { "rx_bytes" }, | |
613 | { "rx_pause" }, | |
614 | { "rx_drop_frame" }, | |
615 | { "rx_packets" }, | |
616 | { "rx_errors_total" } | |
617 | }; | |
618 | ||
619 | struct nv_ethtool_stats { | |
620 | u64 tx_bytes; | |
621 | u64 tx_zero_rexmt; | |
622 | u64 tx_one_rexmt; | |
623 | u64 tx_many_rexmt; | |
624 | u64 tx_late_collision; | |
625 | u64 tx_fifo_errors; | |
626 | u64 tx_carrier_errors; | |
627 | u64 tx_excess_deferral; | |
628 | u64 tx_retry_error; | |
629 | u64 tx_deferral; | |
630 | u64 tx_packets; | |
631 | u64 tx_pause; | |
632 | u64 rx_frame_error; | |
633 | u64 rx_extra_byte; | |
634 | u64 rx_late_collision; | |
635 | u64 rx_runt; | |
636 | u64 rx_frame_too_long; | |
637 | u64 rx_over_errors; | |
638 | u64 rx_crc_errors; | |
639 | u64 rx_frame_align_error; | |
640 | u64 rx_length_error; | |
641 | u64 rx_unicast; | |
642 | u64 rx_multicast; | |
643 | u64 rx_broadcast; | |
644 | u64 rx_bytes; | |
645 | u64 rx_pause; | |
646 | u64 rx_drop_frame; | |
647 | u64 rx_packets; | |
648 | u64 rx_errors_total; | |
649 | }; | |
650 | ||
9589c77a AA |
651 | /* diagnostics */ |
652 | #define NV_TEST_COUNT_BASE 3 | |
653 | #define NV_TEST_COUNT_EXTENDED 4 | |
654 | ||
655 | static const struct nv_ethtool_str nv_etests_str[] = { | |
656 | { "link (online/offline)" }, | |
657 | { "register (offline) " }, | |
658 | { "interrupt (offline) " }, | |
659 | { "loopback (offline) " } | |
660 | }; | |
661 | ||
662 | struct register_test { | |
a8bed49e SH |
663 | __le32 reg; |
664 | __le32 mask; | |
9589c77a AA |
665 | }; |
666 | ||
667 | static const struct register_test nv_registers_test[] = { | |
668 | { NvRegUnknownSetupReg6, 0x01 }, | |
669 | { NvRegMisc1, 0x03c }, | |
670 | { NvRegOffloadConfig, 0x03ff }, | |
671 | { NvRegMulticastAddrA, 0xffffffff }, | |
95d161cb | 672 | { NvRegTxWatermark, 0x0ff }, |
9589c77a AA |
673 | { NvRegWakeUpFlags, 0x07777 }, |
674 | { 0,0 } | |
675 | }; | |
676 | ||
1da177e4 LT |
677 | /* |
678 | * SMP locking: | |
679 | * All hardware access under dev->priv->lock, except the performance | |
680 | * critical parts: | |
681 | * - rx is (pseudo-) lockless: it relies on the single-threading provided | |
682 | * by the arch code for interrupts. | |
932ff279 | 683 | * - tx setup is lockless: it relies on netif_tx_lock. Actual submission |
1da177e4 | 684 | * needs dev->priv->lock :-( |
932ff279 | 685 | * - set_multicast_list: preparation lockless, relies on netif_tx_lock. |
1da177e4 LT |
686 | */ |
687 | ||
688 | /* in dev: base, irq */ | |
689 | struct fe_priv { | |
690 | spinlock_t lock; | |
691 | ||
692 | /* General data: | |
693 | * Locking: spin_lock(&np->lock); */ | |
694 | struct net_device_stats stats; | |
52da3578 | 695 | struct nv_ethtool_stats estats; |
1da177e4 LT |
696 | int in_shutdown; |
697 | u32 linkspeed; | |
698 | int duplex; | |
699 | int autoneg; | |
700 | int fixed_mode; | |
701 | int phyaddr; | |
702 | int wolenabled; | |
703 | unsigned int phy_oui; | |
704 | u16 gigabit; | |
9589c77a | 705 | int intr_test; |
1da177e4 LT |
706 | |
707 | /* General data: RO fields */ | |
708 | dma_addr_t ring_addr; | |
709 | struct pci_dev *pci_dev; | |
710 | u32 orig_mac[2]; | |
711 | u32 irqmask; | |
712 | u32 desc_ver; | |
8a4ae7f2 | 713 | u32 txrxctl_bits; |
ee407b02 | 714 | u32 vlanctl_bits; |
86a0f043 AA |
715 | u32 driver_data; |
716 | u32 register_size; | |
1da177e4 LT |
717 | |
718 | void __iomem *base; | |
719 | ||
720 | /* rx specific fields. | |
721 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | |
722 | */ | |
f82a9352 | 723 | union ring_type rx_ring; |
1da177e4 | 724 | unsigned int cur_rx, refill_rx; |
eafa59f6 AA |
725 | struct sk_buff **rx_skbuff; |
726 | dma_addr_t *rx_dma; | |
1da177e4 | 727 | unsigned int rx_buf_sz; |
d81c0983 | 728 | unsigned int pkt_limit; |
1da177e4 LT |
729 | struct timer_list oom_kick; |
730 | struct timer_list nic_poll; | |
52da3578 | 731 | struct timer_list stats_poll; |
d33a73c8 | 732 | u32 nic_poll_irq; |
eafa59f6 | 733 | int rx_ring_size; |
1da177e4 LT |
734 | |
735 | /* media detection workaround. | |
736 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | |
737 | */ | |
738 | int need_linktimer; | |
739 | unsigned long link_timeout; | |
740 | /* | |
741 | * tx specific fields. | |
742 | */ | |
f82a9352 | 743 | union ring_type tx_ring; |
1da177e4 | 744 | unsigned int next_tx, nic_tx; |
eafa59f6 AA |
745 | struct sk_buff **tx_skbuff; |
746 | dma_addr_t *tx_dma; | |
747 | unsigned int *tx_dma_len; | |
1da177e4 | 748 | u32 tx_flags; |
eafa59f6 AA |
749 | int tx_ring_size; |
750 | int tx_limit_start; | |
751 | int tx_limit_stop; | |
ee407b02 AA |
752 | |
753 | /* vlan fields */ | |
754 | struct vlan_group *vlangrp; | |
d33a73c8 AA |
755 | |
756 | /* msi/msi-x fields */ | |
757 | u32 msi_flags; | |
758 | struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | |
eb91f61b AA |
759 | |
760 | /* flow control */ | |
761 | u32 pause_flags; | |
1da177e4 LT |
762 | }; |
763 | ||
764 | /* | |
765 | * Maximum number of loops until we assume that a bit in the irq mask | |
766 | * is stuck. Overridable with module param. | |
767 | */ | |
768 | static int max_interrupt_work = 5; | |
769 | ||
a971c324 AA |
770 | /* |
771 | * Optimization can be either throuput mode or cpu mode | |
f3b197ac | 772 | * |
a971c324 AA |
773 | * Throughput Mode: Every tx and rx packet will generate an interrupt. |
774 | * CPU Mode: Interrupts are controlled by a timer. | |
775 | */ | |
69fe3fd7 AA |
776 | enum { |
777 | NV_OPTIMIZATION_MODE_THROUGHPUT, | |
778 | NV_OPTIMIZATION_MODE_CPU | |
779 | }; | |
a971c324 AA |
780 | static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT; |
781 | ||
782 | /* | |
783 | * Poll interval for timer irq | |
784 | * | |
785 | * This interval determines how frequent an interrupt is generated. | |
786 | * The is value is determined by [(time_in_micro_secs * 100) / (2^10)] | |
787 | * Min = 0, and Max = 65535 | |
788 | */ | |
789 | static int poll_interval = -1; | |
790 | ||
d33a73c8 | 791 | /* |
69fe3fd7 | 792 | * MSI interrupts |
d33a73c8 | 793 | */ |
69fe3fd7 AA |
794 | enum { |
795 | NV_MSI_INT_DISABLED, | |
796 | NV_MSI_INT_ENABLED | |
797 | }; | |
798 | static int msi = NV_MSI_INT_ENABLED; | |
d33a73c8 AA |
799 | |
800 | /* | |
69fe3fd7 | 801 | * MSIX interrupts |
d33a73c8 | 802 | */ |
69fe3fd7 AA |
803 | enum { |
804 | NV_MSIX_INT_DISABLED, | |
805 | NV_MSIX_INT_ENABLED | |
806 | }; | |
807 | static int msix = NV_MSIX_INT_ENABLED; | |
808 | ||
809 | /* | |
810 | * DMA 64bit | |
811 | */ | |
812 | enum { | |
813 | NV_DMA_64BIT_DISABLED, | |
814 | NV_DMA_64BIT_ENABLED | |
815 | }; | |
816 | static int dma_64bit = NV_DMA_64BIT_ENABLED; | |
d33a73c8 | 817 | |
1da177e4 LT |
818 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
819 | { | |
820 | return netdev_priv(dev); | |
821 | } | |
822 | ||
823 | static inline u8 __iomem *get_hwbase(struct net_device *dev) | |
824 | { | |
ac9c1897 | 825 | return ((struct fe_priv *)netdev_priv(dev))->base; |
1da177e4 LT |
826 | } |
827 | ||
828 | static inline void pci_push(u8 __iomem *base) | |
829 | { | |
830 | /* force out pending posted writes */ | |
831 | readl(base); | |
832 | } | |
833 | ||
834 | static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) | |
835 | { | |
f82a9352 | 836 | return le32_to_cpu(prd->flaglen) |
1da177e4 LT |
837 | & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); |
838 | } | |
839 | ||
ee73362c MS |
840 | static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) |
841 | { | |
f82a9352 | 842 | return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; |
ee73362c MS |
843 | } |
844 | ||
1da177e4 LT |
845 | static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, |
846 | int delay, int delaymax, const char *msg) | |
847 | { | |
848 | u8 __iomem *base = get_hwbase(dev); | |
849 | ||
850 | pci_push(base); | |
851 | do { | |
852 | udelay(delay); | |
853 | delaymax -= delay; | |
854 | if (delaymax < 0) { | |
855 | if (msg) | |
856 | printk(msg); | |
857 | return 1; | |
858 | } | |
859 | } while ((readl(base + offset) & mask) != target); | |
860 | return 0; | |
861 | } | |
862 | ||
0832b25a AA |
863 | #define NV_SETUP_RX_RING 0x01 |
864 | #define NV_SETUP_TX_RING 0x02 | |
865 | ||
866 | static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |
867 | { | |
868 | struct fe_priv *np = get_nvpriv(dev); | |
869 | u8 __iomem *base = get_hwbase(dev); | |
870 | ||
871 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
872 | if (rxtx_flags & NV_SETUP_RX_RING) { | |
873 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | |
874 | } | |
875 | if (rxtx_flags & NV_SETUP_TX_RING) { | |
eafa59f6 | 876 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); |
0832b25a AA |
877 | } |
878 | } else { | |
879 | if (rxtx_flags & NV_SETUP_RX_RING) { | |
880 | writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr); | |
881 | writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh); | |
882 | } | |
883 | if (rxtx_flags & NV_SETUP_TX_RING) { | |
eafa59f6 AA |
884 | writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr); |
885 | writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh); | |
0832b25a AA |
886 | } |
887 | } | |
888 | } | |
889 | ||
eafa59f6 AA |
890 | static void free_rings(struct net_device *dev) |
891 | { | |
892 | struct fe_priv *np = get_nvpriv(dev); | |
893 | ||
894 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 | 895 | if (np->rx_ring.orig) |
eafa59f6 AA |
896 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
897 | np->rx_ring.orig, np->ring_addr); | |
898 | } else { | |
899 | if (np->rx_ring.ex) | |
900 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), | |
901 | np->rx_ring.ex, np->ring_addr); | |
902 | } | |
903 | if (np->rx_skbuff) | |
904 | kfree(np->rx_skbuff); | |
905 | if (np->rx_dma) | |
906 | kfree(np->rx_dma); | |
907 | if (np->tx_skbuff) | |
908 | kfree(np->tx_skbuff); | |
909 | if (np->tx_dma) | |
910 | kfree(np->tx_dma); | |
911 | if (np->tx_dma_len) | |
912 | kfree(np->tx_dma_len); | |
913 | } | |
914 | ||
84b3932b AA |
915 | static int using_multi_irqs(struct net_device *dev) |
916 | { | |
917 | struct fe_priv *np = get_nvpriv(dev); | |
918 | ||
919 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | |
920 | ((np->msi_flags & NV_MSI_X_ENABLED) && | |
921 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) | |
922 | return 0; | |
923 | else | |
924 | return 1; | |
925 | } | |
926 | ||
927 | static void nv_enable_irq(struct net_device *dev) | |
928 | { | |
929 | struct fe_priv *np = get_nvpriv(dev); | |
930 | ||
931 | if (!using_multi_irqs(dev)) { | |
932 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
933 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
934 | else | |
935 | enable_irq(dev->irq); | |
936 | } else { | |
937 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
938 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
939 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
940 | } | |
941 | } | |
942 | ||
943 | static void nv_disable_irq(struct net_device *dev) | |
944 | { | |
945 | struct fe_priv *np = get_nvpriv(dev); | |
946 | ||
947 | if (!using_multi_irqs(dev)) { | |
948 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
949 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
950 | else | |
951 | disable_irq(dev->irq); | |
952 | } else { | |
953 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
954 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
955 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
956 | } | |
957 | } | |
958 | ||
959 | /* In MSIX mode, a write to irqmask behaves as XOR */ | |
960 | static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) | |
961 | { | |
962 | u8 __iomem *base = get_hwbase(dev); | |
963 | ||
964 | writel(mask, base + NvRegIrqMask); | |
965 | } | |
966 | ||
967 | static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) | |
968 | { | |
969 | struct fe_priv *np = get_nvpriv(dev); | |
970 | u8 __iomem *base = get_hwbase(dev); | |
971 | ||
972 | if (np->msi_flags & NV_MSI_X_ENABLED) { | |
973 | writel(mask, base + NvRegIrqMask); | |
974 | } else { | |
975 | if (np->msi_flags & NV_MSI_ENABLED) | |
976 | writel(0, base + NvRegMSIIrqMask); | |
977 | writel(0, base + NvRegIrqMask); | |
978 | } | |
979 | } | |
980 | ||
1da177e4 LT |
981 | #define MII_READ (-1) |
982 | /* mii_rw: read/write a register on the PHY. | |
983 | * | |
984 | * Caller must guarantee serialization | |
985 | */ | |
986 | static int mii_rw(struct net_device *dev, int addr, int miireg, int value) | |
987 | { | |
988 | u8 __iomem *base = get_hwbase(dev); | |
989 | u32 reg; | |
990 | int retval; | |
991 | ||
992 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
993 | ||
994 | reg = readl(base + NvRegMIIControl); | |
995 | if (reg & NVREG_MIICTL_INUSE) { | |
996 | writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl); | |
997 | udelay(NV_MIIBUSY_DELAY); | |
998 | } | |
999 | ||
1000 | reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg; | |
1001 | if (value != MII_READ) { | |
1002 | writel(value, base + NvRegMIIData); | |
1003 | reg |= NVREG_MIICTL_WRITE; | |
1004 | } | |
1005 | writel(reg, base + NvRegMIIControl); | |
1006 | ||
1007 | if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, | |
1008 | NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { | |
1009 | dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n", | |
1010 | dev->name, miireg, addr); | |
1011 | retval = -1; | |
1012 | } else if (value != MII_READ) { | |
1013 | /* it was a write operation - fewer failures are detectable */ | |
1014 | dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n", | |
1015 | dev->name, value, miireg, addr); | |
1016 | retval = 0; | |
1017 | } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { | |
1018 | dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n", | |
1019 | dev->name, miireg, addr); | |
1020 | retval = -1; | |
1021 | } else { | |
1022 | retval = readl(base + NvRegMIIData); | |
1023 | dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n", | |
1024 | dev->name, miireg, addr, retval); | |
1025 | } | |
1026 | ||
1027 | return retval; | |
1028 | } | |
1029 | ||
1030 | static int phy_reset(struct net_device *dev) | |
1031 | { | |
ac9c1897 | 1032 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1033 | u32 miicontrol; |
1034 | unsigned int tries = 0; | |
1035 | ||
1036 | miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
1037 | miicontrol |= BMCR_RESET; | |
1038 | if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { | |
1039 | return -1; | |
1040 | } | |
1041 | ||
1042 | /* wait for 500ms */ | |
1043 | msleep(500); | |
1044 | ||
1045 | /* must wait till reset is deasserted */ | |
1046 | while (miicontrol & BMCR_RESET) { | |
1047 | msleep(10); | |
1048 | miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
1049 | /* FIXME: 100 tries seem excessive */ | |
1050 | if (tries++ > 100) | |
1051 | return -1; | |
1052 | } | |
1053 | return 0; | |
1054 | } | |
1055 | ||
1056 | static int phy_init(struct net_device *dev) | |
1057 | { | |
1058 | struct fe_priv *np = get_nvpriv(dev); | |
1059 | u8 __iomem *base = get_hwbase(dev); | |
1060 | u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; | |
1061 | ||
1062 | /* set advertise register */ | |
1063 | reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
eb91f61b | 1064 | reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); |
1da177e4 LT |
1065 | if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { |
1066 | printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); | |
1067 | return PHY_ERROR; | |
1068 | } | |
1069 | ||
1070 | /* get phy interface type */ | |
1071 | phyinterface = readl(base + NvRegPhyInterface); | |
1072 | ||
1073 | /* see if gigabit phy */ | |
1074 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
1075 | if (mii_status & PHY_GIGABIT) { | |
1076 | np->gigabit = PHY_GIGABIT; | |
eb91f61b | 1077 | mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 LT |
1078 | mii_control_1000 &= ~ADVERTISE_1000HALF; |
1079 | if (phyinterface & PHY_RGMII) | |
1080 | mii_control_1000 |= ADVERTISE_1000FULL; | |
1081 | else | |
1082 | mii_control_1000 &= ~ADVERTISE_1000FULL; | |
1083 | ||
eb91f61b | 1084 | if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { |
1da177e4 LT |
1085 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); |
1086 | return PHY_ERROR; | |
1087 | } | |
1088 | } | |
1089 | else | |
1090 | np->gigabit = 0; | |
1091 | ||
1092 | /* reset the phy */ | |
1093 | if (phy_reset(dev)) { | |
1094 | printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); | |
1095 | return PHY_ERROR; | |
1096 | } | |
1097 | ||
1098 | /* phy vendor specific configuration */ | |
1099 | if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { | |
1100 | phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); | |
1101 | phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); | |
1102 | phy_reserved |= (PHY_INIT3 | PHY_INIT4); | |
1103 | if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { | |
1104 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1105 | return PHY_ERROR; | |
1106 | } | |
1107 | phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); | |
1108 | phy_reserved |= PHY_INIT5; | |
1109 | if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { | |
1110 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1111 | return PHY_ERROR; | |
1112 | } | |
1113 | } | |
1114 | if (np->phy_oui == PHY_OUI_CICADA) { | |
1115 | phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); | |
1116 | phy_reserved |= PHY_INIT6; | |
1117 | if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { | |
1118 | printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); | |
1119 | return PHY_ERROR; | |
1120 | } | |
1121 | } | |
eb91f61b AA |
1122 | /* some phys clear out pause advertisment on reset, set it back */ |
1123 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); | |
1da177e4 LT |
1124 | |
1125 | /* restart auto negotiation */ | |
1126 | mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
1127 | mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); | |
1128 | if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { | |
1129 | return PHY_ERROR; | |
1130 | } | |
1131 | ||
1132 | return 0; | |
1133 | } | |
1134 | ||
1135 | static void nv_start_rx(struct net_device *dev) | |
1136 | { | |
ac9c1897 | 1137 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1138 | u8 __iomem *base = get_hwbase(dev); |
1139 | ||
1140 | dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); | |
1141 | /* Already running? Stop it. */ | |
1142 | if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { | |
1143 | writel(0, base + NvRegReceiverControl); | |
1144 | pci_push(base); | |
1145 | } | |
1146 | writel(np->linkspeed, base + NvRegLinkSpeed); | |
1147 | pci_push(base); | |
1148 | writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); | |
1149 | dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", | |
1150 | dev->name, np->duplex, np->linkspeed); | |
1151 | pci_push(base); | |
1152 | } | |
1153 | ||
1154 | static void nv_stop_rx(struct net_device *dev) | |
1155 | { | |
1156 | u8 __iomem *base = get_hwbase(dev); | |
1157 | ||
1158 | dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); | |
1159 | writel(0, base + NvRegReceiverControl); | |
1160 | reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, | |
1161 | NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, | |
1162 | KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); | |
1163 | ||
1164 | udelay(NV_RXSTOP_DELAY2); | |
1165 | writel(0, base + NvRegLinkSpeed); | |
1166 | } | |
1167 | ||
1168 | static void nv_start_tx(struct net_device *dev) | |
1169 | { | |
1170 | u8 __iomem *base = get_hwbase(dev); | |
1171 | ||
1172 | dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); | |
1173 | writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); | |
1174 | pci_push(base); | |
1175 | } | |
1176 | ||
1177 | static void nv_stop_tx(struct net_device *dev) | |
1178 | { | |
1179 | u8 __iomem *base = get_hwbase(dev); | |
1180 | ||
1181 | dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); | |
1182 | writel(0, base + NvRegTransmitterControl); | |
1183 | reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, | |
1184 | NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, | |
1185 | KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); | |
1186 | ||
1187 | udelay(NV_TXSTOP_DELAY2); | |
5070d340 | 1188 | writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); |
1da177e4 LT |
1189 | } |
1190 | ||
1191 | static void nv_txrx_reset(struct net_device *dev) | |
1192 | { | |
ac9c1897 | 1193 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1194 | u8 __iomem *base = get_hwbase(dev); |
1195 | ||
1196 | dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); | |
8a4ae7f2 | 1197 | writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); |
1da177e4 LT |
1198 | pci_push(base); |
1199 | udelay(NV_TXRX_RESET_DELAY); | |
8a4ae7f2 | 1200 | writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); |
1da177e4 LT |
1201 | pci_push(base); |
1202 | } | |
1203 | ||
86a0f043 AA |
1204 | static void nv_mac_reset(struct net_device *dev) |
1205 | { | |
1206 | struct fe_priv *np = netdev_priv(dev); | |
1207 | u8 __iomem *base = get_hwbase(dev); | |
1208 | ||
1209 | dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name); | |
1210 | writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); | |
1211 | pci_push(base); | |
1212 | writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset); | |
1213 | pci_push(base); | |
1214 | udelay(NV_MAC_RESET_DELAY); | |
1215 | writel(0, base + NvRegMacReset); | |
1216 | pci_push(base); | |
1217 | udelay(NV_MAC_RESET_DELAY); | |
1218 | writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); | |
1219 | pci_push(base); | |
1220 | } | |
1221 | ||
1da177e4 LT |
1222 | /* |
1223 | * nv_get_stats: dev->get_stats function | |
1224 | * Get latest stats value from the nic. | |
1225 | * Called with read_lock(&dev_base_lock) held for read - | |
1226 | * only synchronized against unregister_netdevice. | |
1227 | */ | |
1228 | static struct net_device_stats *nv_get_stats(struct net_device *dev) | |
1229 | { | |
ac9c1897 | 1230 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1231 | |
1232 | /* It seems that the nic always generates interrupts and doesn't | |
1233 | * accumulate errors internally. Thus the current values in np->stats | |
1234 | * are already up to date. | |
1235 | */ | |
1236 | return &np->stats; | |
1237 | } | |
1238 | ||
1239 | /* | |
1240 | * nv_alloc_rx: fill rx ring entries. | |
1241 | * Return 1 if the allocations for the skbs failed and the | |
1242 | * rx engine is without Available descriptors | |
1243 | */ | |
1244 | static int nv_alloc_rx(struct net_device *dev) | |
1245 | { | |
ac9c1897 | 1246 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1247 | unsigned int refill_rx = np->refill_rx; |
1248 | int nr; | |
1249 | ||
1250 | while (np->cur_rx != refill_rx) { | |
1251 | struct sk_buff *skb; | |
1252 | ||
eafa59f6 | 1253 | nr = refill_rx % np->rx_ring_size; |
1da177e4 LT |
1254 | if (np->rx_skbuff[nr] == NULL) { |
1255 | ||
d81c0983 | 1256 | skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD); |
1da177e4 LT |
1257 | if (!skb) |
1258 | break; | |
1259 | ||
1260 | skb->dev = dev; | |
1261 | np->rx_skbuff[nr] = skb; | |
1262 | } else { | |
1263 | skb = np->rx_skbuff[nr]; | |
1264 | } | |
1836098f MS |
1265 | np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, |
1266 | skb->end-skb->data, PCI_DMA_FROMDEVICE); | |
ee73362c | 1267 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
f82a9352 | 1268 | np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]); |
ee73362c | 1269 | wmb(); |
f82a9352 | 1270 | np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); |
ee73362c | 1271 | } else { |
f82a9352 SH |
1272 | np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32; |
1273 | np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; | |
ee73362c | 1274 | wmb(); |
f82a9352 | 1275 | np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); |
ee73362c | 1276 | } |
1da177e4 LT |
1277 | dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", |
1278 | dev->name, refill_rx); | |
1279 | refill_rx++; | |
1280 | } | |
1281 | np->refill_rx = refill_rx; | |
eafa59f6 | 1282 | if (np->cur_rx - refill_rx == np->rx_ring_size) |
1da177e4 LT |
1283 | return 1; |
1284 | return 0; | |
1285 | } | |
1286 | ||
e27cdba5 SH |
1287 | /* If rx bufs are exhausted called after 50ms to attempt to refresh */ |
1288 | #ifdef CONFIG_FORCEDETH_NAPI | |
1289 | static void nv_do_rx_refill(unsigned long data) | |
1290 | { | |
1291 | struct net_device *dev = (struct net_device *) data; | |
1292 | ||
1293 | /* Just reschedule NAPI rx processing */ | |
1294 | netif_rx_schedule(dev); | |
1295 | } | |
1296 | #else | |
1da177e4 LT |
1297 | static void nv_do_rx_refill(unsigned long data) |
1298 | { | |
1299 | struct net_device *dev = (struct net_device *) data; | |
ac9c1897 | 1300 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1301 | |
84b3932b AA |
1302 | if (!using_multi_irqs(dev)) { |
1303 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
1304 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
1305 | else | |
1306 | disable_irq(dev->irq); | |
d33a73c8 AA |
1307 | } else { |
1308 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1309 | } | |
1da177e4 | 1310 | if (nv_alloc_rx(dev)) { |
84b3932b | 1311 | spin_lock_irq(&np->lock); |
1da177e4 LT |
1312 | if (!np->in_shutdown) |
1313 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
84b3932b | 1314 | spin_unlock_irq(&np->lock); |
1da177e4 | 1315 | } |
84b3932b AA |
1316 | if (!using_multi_irqs(dev)) { |
1317 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
1318 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | |
1319 | else | |
1320 | enable_irq(dev->irq); | |
d33a73c8 AA |
1321 | } else { |
1322 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1323 | } | |
1da177e4 | 1324 | } |
e27cdba5 | 1325 | #endif |
1da177e4 | 1326 | |
f3b197ac | 1327 | static void nv_init_rx(struct net_device *dev) |
1da177e4 | 1328 | { |
ac9c1897 | 1329 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
1330 | int i; |
1331 | ||
eafa59f6 | 1332 | np->cur_rx = np->rx_ring_size; |
1da177e4 | 1333 | np->refill_rx = 0; |
eafa59f6 | 1334 | for (i = 0; i < np->rx_ring_size; i++) |
ee73362c | 1335 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
f82a9352 | 1336 | np->rx_ring.orig[i].flaglen = 0; |
ee73362c | 1337 | else |
f82a9352 | 1338 | np->rx_ring.ex[i].flaglen = 0; |
d81c0983 MS |
1339 | } |
1340 | ||
1341 | static void nv_init_tx(struct net_device *dev) | |
1342 | { | |
ac9c1897 | 1343 | struct fe_priv *np = netdev_priv(dev); |
d81c0983 MS |
1344 | int i; |
1345 | ||
1346 | np->next_tx = np->nic_tx = 0; | |
eafa59f6 | 1347 | for (i = 0; i < np->tx_ring_size; i++) { |
ee73362c | 1348 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
f82a9352 | 1349 | np->tx_ring.orig[i].flaglen = 0; |
ee73362c | 1350 | else |
f82a9352 | 1351 | np->tx_ring.ex[i].flaglen = 0; |
ac9c1897 | 1352 | np->tx_skbuff[i] = NULL; |
fa45459e | 1353 | np->tx_dma[i] = 0; |
ac9c1897 | 1354 | } |
d81c0983 MS |
1355 | } |
1356 | ||
1357 | static int nv_init_ring(struct net_device *dev) | |
1358 | { | |
1359 | nv_init_tx(dev); | |
1360 | nv_init_rx(dev); | |
1da177e4 LT |
1361 | return nv_alloc_rx(dev); |
1362 | } | |
1363 | ||
fa45459e | 1364 | static int nv_release_txskb(struct net_device *dev, unsigned int skbnr) |
ac9c1897 AA |
1365 | { |
1366 | struct fe_priv *np = netdev_priv(dev); | |
fa45459e AA |
1367 | |
1368 | dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n", | |
1369 | dev->name, skbnr); | |
1370 | ||
1371 | if (np->tx_dma[skbnr]) { | |
1372 | pci_unmap_page(np->pci_dev, np->tx_dma[skbnr], | |
1373 | np->tx_dma_len[skbnr], | |
1374 | PCI_DMA_TODEVICE); | |
1375 | np->tx_dma[skbnr] = 0; | |
1376 | } | |
1377 | ||
1378 | if (np->tx_skbuff[skbnr]) { | |
d33a73c8 | 1379 | dev_kfree_skb_any(np->tx_skbuff[skbnr]); |
fa45459e AA |
1380 | np->tx_skbuff[skbnr] = NULL; |
1381 | return 1; | |
1382 | } else { | |
1383 | return 0; | |
ac9c1897 | 1384 | } |
ac9c1897 AA |
1385 | } |
1386 | ||
1da177e4 LT |
1387 | static void nv_drain_tx(struct net_device *dev) |
1388 | { | |
ac9c1897 AA |
1389 | struct fe_priv *np = netdev_priv(dev); |
1390 | unsigned int i; | |
f3b197ac | 1391 | |
eafa59f6 | 1392 | for (i = 0; i < np->tx_ring_size; i++) { |
ee73362c | 1393 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
f82a9352 | 1394 | np->tx_ring.orig[i].flaglen = 0; |
ee73362c | 1395 | else |
f82a9352 | 1396 | np->tx_ring.ex[i].flaglen = 0; |
fa45459e | 1397 | if (nv_release_txskb(dev, i)) |
1da177e4 | 1398 | np->stats.tx_dropped++; |
1da177e4 LT |
1399 | } |
1400 | } | |
1401 | ||
1402 | static void nv_drain_rx(struct net_device *dev) | |
1403 | { | |
ac9c1897 | 1404 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1405 | int i; |
eafa59f6 | 1406 | for (i = 0; i < np->rx_ring_size; i++) { |
ee73362c | 1407 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
f82a9352 | 1408 | np->rx_ring.orig[i].flaglen = 0; |
ee73362c | 1409 | else |
f82a9352 | 1410 | np->rx_ring.ex[i].flaglen = 0; |
1da177e4 LT |
1411 | wmb(); |
1412 | if (np->rx_skbuff[i]) { | |
1413 | pci_unmap_single(np->pci_dev, np->rx_dma[i], | |
1836098f | 1414 | np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, |
1da177e4 LT |
1415 | PCI_DMA_FROMDEVICE); |
1416 | dev_kfree_skb(np->rx_skbuff[i]); | |
1417 | np->rx_skbuff[i] = NULL; | |
1418 | } | |
1419 | } | |
1420 | } | |
1421 | ||
1422 | static void drain_ring(struct net_device *dev) | |
1423 | { | |
1424 | nv_drain_tx(dev); | |
1425 | nv_drain_rx(dev); | |
1426 | } | |
1427 | ||
1428 | /* | |
1429 | * nv_start_xmit: dev->hard_start_xmit function | |
932ff279 | 1430 | * Called with netif_tx_lock held. |
1da177e4 LT |
1431 | */ |
1432 | static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1433 | { | |
ac9c1897 | 1434 | struct fe_priv *np = netdev_priv(dev); |
fa45459e | 1435 | u32 tx_flags = 0; |
ac9c1897 AA |
1436 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); |
1437 | unsigned int fragments = skb_shinfo(skb)->nr_frags; | |
eafa59f6 AA |
1438 | unsigned int nr = (np->next_tx - 1) % np->tx_ring_size; |
1439 | unsigned int start_nr = np->next_tx % np->tx_ring_size; | |
ac9c1897 | 1440 | unsigned int i; |
fa45459e AA |
1441 | u32 offset = 0; |
1442 | u32 bcnt; | |
1443 | u32 size = skb->len-skb->data_len; | |
1444 | u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | |
ee407b02 | 1445 | u32 tx_flags_vlan = 0; |
fa45459e AA |
1446 | |
1447 | /* add fragments to entries count */ | |
1448 | for (i = 0; i < fragments; i++) { | |
1449 | entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + | |
1450 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | |
1451 | } | |
ac9c1897 AA |
1452 | |
1453 | spin_lock_irq(&np->lock); | |
1454 | ||
eafa59f6 | 1455 | if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) { |
ac9c1897 AA |
1456 | spin_unlock_irq(&np->lock); |
1457 | netif_stop_queue(dev); | |
1458 | return NETDEV_TX_BUSY; | |
1459 | } | |
1da177e4 | 1460 | |
fa45459e AA |
1461 | /* setup the header buffer */ |
1462 | do { | |
1463 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | |
eafa59f6 | 1464 | nr = (nr + 1) % np->tx_ring_size; |
fa45459e AA |
1465 | |
1466 | np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt, | |
1467 | PCI_DMA_TODEVICE); | |
1468 | np->tx_dma_len[nr] = bcnt; | |
1469 | ||
1470 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 SH |
1471 | np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); |
1472 | np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); | |
fa45459e | 1473 | } else { |
f82a9352 SH |
1474 | np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; |
1475 | np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | |
1476 | np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); | |
fa45459e AA |
1477 | } |
1478 | tx_flags = np->tx_flags; | |
1479 | offset += bcnt; | |
1480 | size -= bcnt; | |
f82a9352 | 1481 | } while (size); |
fa45459e AA |
1482 | |
1483 | /* setup the fragments */ | |
1484 | for (i = 0; i < fragments; i++) { | |
1485 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1486 | u32 size = frag->size; | |
1487 | offset = 0; | |
1488 | ||
1489 | do { | |
1490 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | |
eafa59f6 | 1491 | nr = (nr + 1) % np->tx_ring_size; |
fa45459e AA |
1492 | |
1493 | np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt, | |
1494 | PCI_DMA_TODEVICE); | |
1495 | np->tx_dma_len[nr] = bcnt; | |
1da177e4 | 1496 | |
ac9c1897 | 1497 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
f82a9352 SH |
1498 | np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]); |
1499 | np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); | |
ac9c1897 | 1500 | } else { |
f82a9352 SH |
1501 | np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32; |
1502 | np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; | |
1503 | np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags); | |
ac9c1897 | 1504 | } |
fa45459e AA |
1505 | offset += bcnt; |
1506 | size -= bcnt; | |
1507 | } while (size); | |
1508 | } | |
ac9c1897 | 1509 | |
fa45459e AA |
1510 | /* set last fragment flag */ |
1511 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 | 1512 | np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra); |
fa45459e | 1513 | } else { |
f82a9352 | 1514 | np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra); |
ac9c1897 AA |
1515 | } |
1516 | ||
fa45459e AA |
1517 | np->tx_skbuff[nr] = skb; |
1518 | ||
ac9c1897 | 1519 | #ifdef NETIF_F_TSO |
89114afd | 1520 | if (skb_is_gso(skb)) |
7967168c | 1521 | tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); |
ac9c1897 AA |
1522 | else |
1523 | #endif | |
fa45459e | 1524 | tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); |
ac9c1897 | 1525 | |
ee407b02 AA |
1526 | /* vlan tag */ |
1527 | if (np->vlangrp && vlan_tx_tag_present(skb)) { | |
1528 | tx_flags_vlan = NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb); | |
1529 | } | |
1530 | ||
fa45459e | 1531 | /* set tx flags */ |
ac9c1897 | 1532 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
f82a9352 | 1533 | np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); |
ac9c1897 | 1534 | } else { |
f82a9352 SH |
1535 | np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan); |
1536 | np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); | |
f3b197ac | 1537 | } |
1da177e4 | 1538 | |
fa45459e AA |
1539 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", |
1540 | dev->name, np->next_tx, entries, tx_flags_extra); | |
1da177e4 LT |
1541 | { |
1542 | int j; | |
1543 | for (j=0; j<64; j++) { | |
1544 | if ((j%16) == 0) | |
1545 | dprintk("\n%03x:", j); | |
1546 | dprintk(" %02x", ((unsigned char*)skb->data)[j]); | |
1547 | } | |
1548 | dprintk("\n"); | |
1549 | } | |
1550 | ||
fa45459e | 1551 | np->next_tx += entries; |
1da177e4 LT |
1552 | |
1553 | dev->trans_start = jiffies; | |
1da177e4 | 1554 | spin_unlock_irq(&np->lock); |
8a4ae7f2 | 1555 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
1da177e4 | 1556 | pci_push(get_hwbase(dev)); |
ac9c1897 | 1557 | return NETDEV_TX_OK; |
1da177e4 LT |
1558 | } |
1559 | ||
1560 | /* | |
1561 | * nv_tx_done: check for completed packets, release the skbs. | |
1562 | * | |
1563 | * Caller must own np->lock. | |
1564 | */ | |
1565 | static void nv_tx_done(struct net_device *dev) | |
1566 | { | |
ac9c1897 | 1567 | struct fe_priv *np = netdev_priv(dev); |
f82a9352 | 1568 | u32 flags; |
ac9c1897 AA |
1569 | unsigned int i; |
1570 | struct sk_buff *skb; | |
1da177e4 LT |
1571 | |
1572 | while (np->nic_tx != np->next_tx) { | |
eafa59f6 | 1573 | i = np->nic_tx % np->tx_ring_size; |
1da177e4 | 1574 | |
ee73362c | 1575 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
f82a9352 | 1576 | flags = le32_to_cpu(np->tx_ring.orig[i].flaglen); |
ee73362c | 1577 | else |
f82a9352 | 1578 | flags = le32_to_cpu(np->tx_ring.ex[i].flaglen); |
1da177e4 | 1579 | |
f82a9352 SH |
1580 | dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n", |
1581 | dev->name, np->nic_tx, flags); | |
1582 | if (flags & NV_TX_VALID) | |
1da177e4 LT |
1583 | break; |
1584 | if (np->desc_ver == DESC_VER_1) { | |
f82a9352 | 1585 | if (flags & NV_TX_LASTPACKET) { |
ac9c1897 | 1586 | skb = np->tx_skbuff[i]; |
f82a9352 | 1587 | if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| |
ac9c1897 | 1588 | NV_TX_UNDERFLOW|NV_TX_ERROR)) { |
f82a9352 | 1589 | if (flags & NV_TX_UNDERFLOW) |
ac9c1897 | 1590 | np->stats.tx_fifo_errors++; |
f82a9352 | 1591 | if (flags & NV_TX_CARRIERLOST) |
ac9c1897 AA |
1592 | np->stats.tx_carrier_errors++; |
1593 | np->stats.tx_errors++; | |
1594 | } else { | |
1595 | np->stats.tx_packets++; | |
1596 | np->stats.tx_bytes += skb->len; | |
1597 | } | |
1da177e4 LT |
1598 | } |
1599 | } else { | |
f82a9352 | 1600 | if (flags & NV_TX2_LASTPACKET) { |
ac9c1897 | 1601 | skb = np->tx_skbuff[i]; |
f82a9352 | 1602 | if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| |
ac9c1897 | 1603 | NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { |
f82a9352 | 1604 | if (flags & NV_TX2_UNDERFLOW) |
ac9c1897 | 1605 | np->stats.tx_fifo_errors++; |
f82a9352 | 1606 | if (flags & NV_TX2_CARRIERLOST) |
ac9c1897 AA |
1607 | np->stats.tx_carrier_errors++; |
1608 | np->stats.tx_errors++; | |
1609 | } else { | |
1610 | np->stats.tx_packets++; | |
1611 | np->stats.tx_bytes += skb->len; | |
f3b197ac | 1612 | } |
1da177e4 LT |
1613 | } |
1614 | } | |
fa45459e | 1615 | nv_release_txskb(dev, i); |
1da177e4 LT |
1616 | np->nic_tx++; |
1617 | } | |
eafa59f6 | 1618 | if (np->next_tx - np->nic_tx < np->tx_limit_start) |
1da177e4 LT |
1619 | netif_wake_queue(dev); |
1620 | } | |
1621 | ||
1622 | /* | |
1623 | * nv_tx_timeout: dev->tx_timeout function | |
932ff279 | 1624 | * Called with netif_tx_lock held. |
1da177e4 LT |
1625 | */ |
1626 | static void nv_tx_timeout(struct net_device *dev) | |
1627 | { | |
ac9c1897 | 1628 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 1629 | u8 __iomem *base = get_hwbase(dev); |
d33a73c8 AA |
1630 | u32 status; |
1631 | ||
1632 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
1633 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
1634 | else | |
1635 | status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
1da177e4 | 1636 | |
d33a73c8 | 1637 | printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); |
1da177e4 | 1638 | |
c2dba06d MS |
1639 | { |
1640 | int i; | |
1641 | ||
1642 | printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n", | |
1643 | dev->name, (unsigned long)np->ring_addr, | |
1644 | np->next_tx, np->nic_tx); | |
1645 | printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); | |
86a0f043 | 1646 | for (i=0;i<=np->register_size;i+= 32) { |
c2dba06d MS |
1647 | printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", |
1648 | i, | |
1649 | readl(base + i + 0), readl(base + i + 4), | |
1650 | readl(base + i + 8), readl(base + i + 12), | |
1651 | readl(base + i + 16), readl(base + i + 20), | |
1652 | readl(base + i + 24), readl(base + i + 28)); | |
1653 | } | |
1654 | printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); | |
eafa59f6 | 1655 | for (i=0;i<np->tx_ring_size;i+= 4) { |
ee73362c MS |
1656 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1657 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | |
f3b197ac | 1658 | i, |
f82a9352 SH |
1659 | le32_to_cpu(np->tx_ring.orig[i].buf), |
1660 | le32_to_cpu(np->tx_ring.orig[i].flaglen), | |
1661 | le32_to_cpu(np->tx_ring.orig[i+1].buf), | |
1662 | le32_to_cpu(np->tx_ring.orig[i+1].flaglen), | |
1663 | le32_to_cpu(np->tx_ring.orig[i+2].buf), | |
1664 | le32_to_cpu(np->tx_ring.orig[i+2].flaglen), | |
1665 | le32_to_cpu(np->tx_ring.orig[i+3].buf), | |
1666 | le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); | |
ee73362c MS |
1667 | } else { |
1668 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", | |
f3b197ac | 1669 | i, |
f82a9352 SH |
1670 | le32_to_cpu(np->tx_ring.ex[i].bufhigh), |
1671 | le32_to_cpu(np->tx_ring.ex[i].buflow), | |
1672 | le32_to_cpu(np->tx_ring.ex[i].flaglen), | |
1673 | le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), | |
1674 | le32_to_cpu(np->tx_ring.ex[i+1].buflow), | |
1675 | le32_to_cpu(np->tx_ring.ex[i+1].flaglen), | |
1676 | le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), | |
1677 | le32_to_cpu(np->tx_ring.ex[i+2].buflow), | |
1678 | le32_to_cpu(np->tx_ring.ex[i+2].flaglen), | |
1679 | le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), | |
1680 | le32_to_cpu(np->tx_ring.ex[i+3].buflow), | |
1681 | le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); | |
ee73362c | 1682 | } |
c2dba06d MS |
1683 | } |
1684 | } | |
1685 | ||
1da177e4 LT |
1686 | spin_lock_irq(&np->lock); |
1687 | ||
1688 | /* 1) stop tx engine */ | |
1689 | nv_stop_tx(dev); | |
1690 | ||
1691 | /* 2) check that the packets were not sent already: */ | |
1692 | nv_tx_done(dev); | |
1693 | ||
1694 | /* 3) if there are dead entries: clear everything */ | |
1695 | if (np->next_tx != np->nic_tx) { | |
1696 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | |
1697 | nv_drain_tx(dev); | |
1698 | np->next_tx = np->nic_tx = 0; | |
0832b25a | 1699 | setup_hw_rings(dev, NV_SETUP_TX_RING); |
1da177e4 LT |
1700 | netif_wake_queue(dev); |
1701 | } | |
1702 | ||
1703 | /* 4) restart tx engine */ | |
1704 | nv_start_tx(dev); | |
1705 | spin_unlock_irq(&np->lock); | |
1706 | } | |
1707 | ||
22c6d143 MS |
1708 | /* |
1709 | * Called when the nic notices a mismatch between the actual data len on the | |
1710 | * wire and the len indicated in the 802 header | |
1711 | */ | |
1712 | static int nv_getlen(struct net_device *dev, void *packet, int datalen) | |
1713 | { | |
1714 | int hdrlen; /* length of the 802 header */ | |
1715 | int protolen; /* length as stored in the proto field */ | |
1716 | ||
1717 | /* 1) calculate len according to header */ | |
f82a9352 | 1718 | if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { |
22c6d143 MS |
1719 | protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); |
1720 | hdrlen = VLAN_HLEN; | |
1721 | } else { | |
1722 | protolen = ntohs( ((struct ethhdr *)packet)->h_proto); | |
1723 | hdrlen = ETH_HLEN; | |
1724 | } | |
1725 | dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n", | |
1726 | dev->name, datalen, protolen, hdrlen); | |
1727 | if (protolen > ETH_DATA_LEN) | |
1728 | return datalen; /* Value in proto field not a len, no checks possible */ | |
1729 | ||
1730 | protolen += hdrlen; | |
1731 | /* consistency checks: */ | |
1732 | if (datalen > ETH_ZLEN) { | |
1733 | if (datalen >= protolen) { | |
1734 | /* more data on wire than in 802 header, trim of | |
1735 | * additional data. | |
1736 | */ | |
1737 | dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", | |
1738 | dev->name, protolen); | |
1739 | return protolen; | |
1740 | } else { | |
1741 | /* less data on wire than mentioned in header. | |
1742 | * Discard the packet. | |
1743 | */ | |
1744 | dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n", | |
1745 | dev->name); | |
1746 | return -1; | |
1747 | } | |
1748 | } else { | |
1749 | /* short packet. Accept only if 802 values are also short */ | |
1750 | if (protolen > ETH_ZLEN) { | |
1751 | dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n", | |
1752 | dev->name); | |
1753 | return -1; | |
1754 | } | |
1755 | dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n", | |
1756 | dev->name, datalen); | |
1757 | return datalen; | |
1758 | } | |
1759 | } | |
1760 | ||
e27cdba5 | 1761 | static int nv_rx_process(struct net_device *dev, int limit) |
1da177e4 | 1762 | { |
ac9c1897 | 1763 | struct fe_priv *np = netdev_priv(dev); |
f82a9352 | 1764 | u32 flags; |
ee407b02 | 1765 | u32 vlanflags = 0; |
e27cdba5 | 1766 | int count; |
ee407b02 | 1767 | |
e27cdba5 | 1768 | for (count = 0; count < limit; ++count) { |
1da177e4 LT |
1769 | struct sk_buff *skb; |
1770 | int len; | |
1771 | int i; | |
eafa59f6 | 1772 | if (np->cur_rx - np->refill_rx >= np->rx_ring_size) |
1da177e4 LT |
1773 | break; /* we scanned the whole ring - do not continue */ |
1774 | ||
eafa59f6 | 1775 | i = np->cur_rx % np->rx_ring_size; |
ee73362c | 1776 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
f82a9352 | 1777 | flags = le32_to_cpu(np->rx_ring.orig[i].flaglen); |
ee73362c MS |
1778 | len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); |
1779 | } else { | |
f82a9352 | 1780 | flags = le32_to_cpu(np->rx_ring.ex[i].flaglen); |
ee73362c | 1781 | len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); |
f82a9352 | 1782 | vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow); |
ee73362c | 1783 | } |
1da177e4 | 1784 | |
f82a9352 SH |
1785 | dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n", |
1786 | dev->name, np->cur_rx, flags); | |
1da177e4 | 1787 | |
f82a9352 | 1788 | if (flags & NV_RX_AVAIL) |
1da177e4 LT |
1789 | break; /* still owned by hardware, */ |
1790 | ||
1791 | /* | |
1792 | * the packet is for us - immediately tear down the pci mapping. | |
1793 | * TODO: check if a prefetch of the first cacheline improves | |
1794 | * the performance. | |
1795 | */ | |
1796 | pci_unmap_single(np->pci_dev, np->rx_dma[i], | |
1836098f | 1797 | np->rx_skbuff[i]->end-np->rx_skbuff[i]->data, |
1da177e4 LT |
1798 | PCI_DMA_FROMDEVICE); |
1799 | ||
1800 | { | |
1801 | int j; | |
f82a9352 | 1802 | dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags); |
1da177e4 LT |
1803 | for (j=0; j<64; j++) { |
1804 | if ((j%16) == 0) | |
1805 | dprintk("\n%03x:", j); | |
1806 | dprintk(" %02x", ((unsigned char*)np->rx_skbuff[i]->data)[j]); | |
1807 | } | |
1808 | dprintk("\n"); | |
1809 | } | |
1810 | /* look at what we actually got: */ | |
1811 | if (np->desc_ver == DESC_VER_1) { | |
f82a9352 | 1812 | if (!(flags & NV_RX_DESCRIPTORVALID)) |
1da177e4 LT |
1813 | goto next_pkt; |
1814 | ||
f82a9352 SH |
1815 | if (flags & NV_RX_ERROR) { |
1816 | if (flags & NV_RX_MISSEDFRAME) { | |
a971c324 | 1817 | np->stats.rx_missed_errors++; |
1da177e4 LT |
1818 | np->stats.rx_errors++; |
1819 | goto next_pkt; | |
1820 | } | |
f82a9352 | 1821 | if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { |
a971c324 AA |
1822 | np->stats.rx_errors++; |
1823 | goto next_pkt; | |
1824 | } | |
f82a9352 | 1825 | if (flags & NV_RX_CRCERR) { |
a971c324 AA |
1826 | np->stats.rx_crc_errors++; |
1827 | np->stats.rx_errors++; | |
1828 | goto next_pkt; | |
1829 | } | |
f82a9352 | 1830 | if (flags & NV_RX_OVERFLOW) { |
a971c324 AA |
1831 | np->stats.rx_over_errors++; |
1832 | np->stats.rx_errors++; | |
1833 | goto next_pkt; | |
1834 | } | |
f82a9352 | 1835 | if (flags & NV_RX_ERROR4) { |
a971c324 AA |
1836 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); |
1837 | if (len < 0) { | |
1838 | np->stats.rx_errors++; | |
1839 | goto next_pkt; | |
1840 | } | |
1841 | } | |
1842 | /* framing errors are soft errors. */ | |
f82a9352 SH |
1843 | if (flags & NV_RX_FRAMINGERR) { |
1844 | if (flags & NV_RX_SUBSTRACT1) { | |
a971c324 AA |
1845 | len--; |
1846 | } | |
22c6d143 MS |
1847 | } |
1848 | } | |
1da177e4 | 1849 | } else { |
f82a9352 | 1850 | if (!(flags & NV_RX2_DESCRIPTORVALID)) |
1da177e4 LT |
1851 | goto next_pkt; |
1852 | ||
f82a9352 SH |
1853 | if (flags & NV_RX2_ERROR) { |
1854 | if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { | |
1da177e4 LT |
1855 | np->stats.rx_errors++; |
1856 | goto next_pkt; | |
1857 | } | |
f82a9352 | 1858 | if (flags & NV_RX2_CRCERR) { |
a971c324 AA |
1859 | np->stats.rx_crc_errors++; |
1860 | np->stats.rx_errors++; | |
1861 | goto next_pkt; | |
1862 | } | |
f82a9352 | 1863 | if (flags & NV_RX2_OVERFLOW) { |
a971c324 AA |
1864 | np->stats.rx_over_errors++; |
1865 | np->stats.rx_errors++; | |
1866 | goto next_pkt; | |
1867 | } | |
f82a9352 | 1868 | if (flags & NV_RX2_ERROR4) { |
a971c324 AA |
1869 | len = nv_getlen(dev, np->rx_skbuff[i]->data, len); |
1870 | if (len < 0) { | |
1871 | np->stats.rx_errors++; | |
1872 | goto next_pkt; | |
1873 | } | |
1874 | } | |
1875 | /* framing errors are soft errors */ | |
f82a9352 SH |
1876 | if (flags & NV_RX2_FRAMINGERR) { |
1877 | if (flags & NV_RX2_SUBSTRACT1) { | |
a971c324 AA |
1878 | len--; |
1879 | } | |
22c6d143 MS |
1880 | } |
1881 | } | |
5ed2616f | 1882 | if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { |
f82a9352 SH |
1883 | flags &= NV_RX2_CHECKSUMMASK; |
1884 | if (flags == NV_RX2_CHECKSUMOK1 || | |
1885 | flags == NV_RX2_CHECKSUMOK2 || | |
1886 | flags == NV_RX2_CHECKSUMOK3) { | |
5ed2616f AA |
1887 | dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); |
1888 | np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; | |
1889 | } else { | |
1890 | dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name); | |
1891 | } | |
1da177e4 LT |
1892 | } |
1893 | } | |
1894 | /* got a valid packet - forward it to the network core */ | |
1895 | skb = np->rx_skbuff[i]; | |
1896 | np->rx_skbuff[i] = NULL; | |
1897 | ||
1898 | skb_put(skb, len); | |
1899 | skb->protocol = eth_type_trans(skb, dev); | |
1900 | dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", | |
1901 | dev->name, np->cur_rx, len, skb->protocol); | |
e27cdba5 SH |
1902 | #ifdef CONFIG_FORCEDETH_NAPI |
1903 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) | |
1904 | vlan_hwaccel_receive_skb(skb, np->vlangrp, | |
1905 | vlanflags & NV_RX3_VLAN_TAG_MASK); | |
1906 | else | |
1907 | netif_receive_skb(skb); | |
1908 | #else | |
1909 | if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) | |
1910 | vlan_hwaccel_rx(skb, np->vlangrp, | |
1911 | vlanflags & NV_RX3_VLAN_TAG_MASK); | |
1912 | else | |
ee407b02 | 1913 | netif_rx(skb); |
e27cdba5 | 1914 | #endif |
1da177e4 LT |
1915 | dev->last_rx = jiffies; |
1916 | np->stats.rx_packets++; | |
1917 | np->stats.rx_bytes += len; | |
1918 | next_pkt: | |
1919 | np->cur_rx++; | |
1920 | } | |
e27cdba5 SH |
1921 | |
1922 | return count; | |
1da177e4 LT |
1923 | } |
1924 | ||
d81c0983 MS |
1925 | static void set_bufsize(struct net_device *dev) |
1926 | { | |
1927 | struct fe_priv *np = netdev_priv(dev); | |
1928 | ||
1929 | if (dev->mtu <= ETH_DATA_LEN) | |
1930 | np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; | |
1931 | else | |
1932 | np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; | |
1933 | } | |
1934 | ||
1da177e4 LT |
1935 | /* |
1936 | * nv_change_mtu: dev->change_mtu function | |
1937 | * Called with dev_base_lock held for read. | |
1938 | */ | |
1939 | static int nv_change_mtu(struct net_device *dev, int new_mtu) | |
1940 | { | |
ac9c1897 | 1941 | struct fe_priv *np = netdev_priv(dev); |
d81c0983 MS |
1942 | int old_mtu; |
1943 | ||
1944 | if (new_mtu < 64 || new_mtu > np->pkt_limit) | |
1da177e4 | 1945 | return -EINVAL; |
d81c0983 MS |
1946 | |
1947 | old_mtu = dev->mtu; | |
1da177e4 | 1948 | dev->mtu = new_mtu; |
d81c0983 MS |
1949 | |
1950 | /* return early if the buffer sizes will not change */ | |
1951 | if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) | |
1952 | return 0; | |
1953 | if (old_mtu == new_mtu) | |
1954 | return 0; | |
1955 | ||
1956 | /* synchronized against open : rtnl_lock() held by caller */ | |
1957 | if (netif_running(dev)) { | |
25097d4b | 1958 | u8 __iomem *base = get_hwbase(dev); |
d81c0983 MS |
1959 | /* |
1960 | * It seems that the nic preloads valid ring entries into an | |
1961 | * internal buffer. The procedure for flushing everything is | |
1962 | * guessed, there is probably a simpler approach. | |
1963 | * Changing the MTU is a rare event, it shouldn't matter. | |
1964 | */ | |
84b3932b | 1965 | nv_disable_irq(dev); |
932ff279 | 1966 | netif_tx_lock_bh(dev); |
d81c0983 MS |
1967 | spin_lock(&np->lock); |
1968 | /* stop engines */ | |
1969 | nv_stop_rx(dev); | |
1970 | nv_stop_tx(dev); | |
1971 | nv_txrx_reset(dev); | |
1972 | /* drain rx queue */ | |
1973 | nv_drain_rx(dev); | |
1974 | nv_drain_tx(dev); | |
1975 | /* reinit driver view of the rx queue */ | |
d81c0983 | 1976 | set_bufsize(dev); |
eafa59f6 | 1977 | if (nv_init_ring(dev)) { |
d81c0983 MS |
1978 | if (!np->in_shutdown) |
1979 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
1980 | } | |
1981 | /* reinit nic view of the rx queue */ | |
1982 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
0832b25a | 1983 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
eafa59f6 | 1984 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
d81c0983 MS |
1985 | base + NvRegRingSizes); |
1986 | pci_push(base); | |
8a4ae7f2 | 1987 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); |
d81c0983 MS |
1988 | pci_push(base); |
1989 | ||
1990 | /* restart rx engine */ | |
1991 | nv_start_rx(dev); | |
1992 | nv_start_tx(dev); | |
1993 | spin_unlock(&np->lock); | |
932ff279 | 1994 | netif_tx_unlock_bh(dev); |
84b3932b | 1995 | nv_enable_irq(dev); |
d81c0983 | 1996 | } |
1da177e4 LT |
1997 | return 0; |
1998 | } | |
1999 | ||
72b31782 MS |
2000 | static void nv_copy_mac_to_hw(struct net_device *dev) |
2001 | { | |
25097d4b | 2002 | u8 __iomem *base = get_hwbase(dev); |
72b31782 MS |
2003 | u32 mac[2]; |
2004 | ||
2005 | mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + | |
2006 | (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); | |
2007 | mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); | |
2008 | ||
2009 | writel(mac[0], base + NvRegMacAddrA); | |
2010 | writel(mac[1], base + NvRegMacAddrB); | |
2011 | } | |
2012 | ||
2013 | /* | |
2014 | * nv_set_mac_address: dev->set_mac_address function | |
2015 | * Called with rtnl_lock() held. | |
2016 | */ | |
2017 | static int nv_set_mac_address(struct net_device *dev, void *addr) | |
2018 | { | |
ac9c1897 | 2019 | struct fe_priv *np = netdev_priv(dev); |
72b31782 MS |
2020 | struct sockaddr *macaddr = (struct sockaddr*)addr; |
2021 | ||
f82a9352 | 2022 | if (!is_valid_ether_addr(macaddr->sa_data)) |
72b31782 MS |
2023 | return -EADDRNOTAVAIL; |
2024 | ||
2025 | /* synchronized against open : rtnl_lock() held by caller */ | |
2026 | memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN); | |
2027 | ||
2028 | if (netif_running(dev)) { | |
932ff279 | 2029 | netif_tx_lock_bh(dev); |
72b31782 MS |
2030 | spin_lock_irq(&np->lock); |
2031 | ||
2032 | /* stop rx engine */ | |
2033 | nv_stop_rx(dev); | |
2034 | ||
2035 | /* set mac address */ | |
2036 | nv_copy_mac_to_hw(dev); | |
2037 | ||
2038 | /* restart rx engine */ | |
2039 | nv_start_rx(dev); | |
2040 | spin_unlock_irq(&np->lock); | |
932ff279 | 2041 | netif_tx_unlock_bh(dev); |
72b31782 MS |
2042 | } else { |
2043 | nv_copy_mac_to_hw(dev); | |
2044 | } | |
2045 | return 0; | |
2046 | } | |
2047 | ||
1da177e4 LT |
2048 | /* |
2049 | * nv_set_multicast: dev->set_multicast function | |
932ff279 | 2050 | * Called with netif_tx_lock held. |
1da177e4 LT |
2051 | */ |
2052 | static void nv_set_multicast(struct net_device *dev) | |
2053 | { | |
ac9c1897 | 2054 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2055 | u8 __iomem *base = get_hwbase(dev); |
2056 | u32 addr[2]; | |
2057 | u32 mask[2]; | |
b6d0773f | 2058 | u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX; |
1da177e4 LT |
2059 | |
2060 | memset(addr, 0, sizeof(addr)); | |
2061 | memset(mask, 0, sizeof(mask)); | |
2062 | ||
2063 | if (dev->flags & IFF_PROMISC) { | |
2064 | printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); | |
b6d0773f | 2065 | pff |= NVREG_PFF_PROMISC; |
1da177e4 | 2066 | } else { |
b6d0773f | 2067 | pff |= NVREG_PFF_MYADDR; |
1da177e4 LT |
2068 | |
2069 | if (dev->flags & IFF_ALLMULTI || dev->mc_list) { | |
2070 | u32 alwaysOff[2]; | |
2071 | u32 alwaysOn[2]; | |
2072 | ||
2073 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff; | |
2074 | if (dev->flags & IFF_ALLMULTI) { | |
2075 | alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; | |
2076 | } else { | |
2077 | struct dev_mc_list *walk; | |
2078 | ||
2079 | walk = dev->mc_list; | |
2080 | while (walk != NULL) { | |
2081 | u32 a, b; | |
2082 | a = le32_to_cpu(*(u32 *) walk->dmi_addr); | |
2083 | b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4])); | |
2084 | alwaysOn[0] &= a; | |
2085 | alwaysOff[0] &= ~a; | |
2086 | alwaysOn[1] &= b; | |
2087 | alwaysOff[1] &= ~b; | |
2088 | walk = walk->next; | |
2089 | } | |
2090 | } | |
2091 | addr[0] = alwaysOn[0]; | |
2092 | addr[1] = alwaysOn[1]; | |
2093 | mask[0] = alwaysOn[0] | alwaysOff[0]; | |
2094 | mask[1] = alwaysOn[1] | alwaysOff[1]; | |
2095 | } | |
2096 | } | |
2097 | addr[0] |= NVREG_MCASTADDRA_FORCE; | |
2098 | pff |= NVREG_PFF_ALWAYS; | |
2099 | spin_lock_irq(&np->lock); | |
2100 | nv_stop_rx(dev); | |
2101 | writel(addr[0], base + NvRegMulticastAddrA); | |
2102 | writel(addr[1], base + NvRegMulticastAddrB); | |
2103 | writel(mask[0], base + NvRegMulticastMaskA); | |
2104 | writel(mask[1], base + NvRegMulticastMaskB); | |
2105 | writel(pff, base + NvRegPacketFilterFlags); | |
2106 | dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n", | |
2107 | dev->name); | |
2108 | nv_start_rx(dev); | |
2109 | spin_unlock_irq(&np->lock); | |
2110 | } | |
2111 | ||
c7985051 | 2112 | static void nv_update_pause(struct net_device *dev, u32 pause_flags) |
b6d0773f AA |
2113 | { |
2114 | struct fe_priv *np = netdev_priv(dev); | |
2115 | u8 __iomem *base = get_hwbase(dev); | |
2116 | ||
2117 | np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); | |
2118 | ||
2119 | if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { | |
2120 | u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX; | |
2121 | if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) { | |
2122 | writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags); | |
2123 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2124 | } else { | |
2125 | writel(pff, base + NvRegPacketFilterFlags); | |
2126 | } | |
2127 | } | |
2128 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { | |
2129 | u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; | |
2130 | if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { | |
2131 | writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); | |
2132 | writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); | |
2133 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2134 | } else { | |
2135 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | |
2136 | writel(regmisc, base + NvRegMisc1); | |
2137 | } | |
2138 | } | |
2139 | } | |
2140 | ||
4ea7f299 AA |
2141 | /** |
2142 | * nv_update_linkspeed: Setup the MAC according to the link partner | |
2143 | * @dev: Network device to be configured | |
2144 | * | |
2145 | * The function queries the PHY and checks if there is a link partner. | |
2146 | * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is | |
2147 | * set to 10 MBit HD. | |
2148 | * | |
2149 | * The function returns 0 if there is no link partner and 1 if there is | |
2150 | * a good link partner. | |
2151 | */ | |
1da177e4 LT |
2152 | static int nv_update_linkspeed(struct net_device *dev) |
2153 | { | |
ac9c1897 | 2154 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 2155 | u8 __iomem *base = get_hwbase(dev); |
eb91f61b AA |
2156 | int adv = 0; |
2157 | int lpa = 0; | |
2158 | int adv_lpa, adv_pause, lpa_pause; | |
1da177e4 LT |
2159 | int newls = np->linkspeed; |
2160 | int newdup = np->duplex; | |
2161 | int mii_status; | |
2162 | int retval = 0; | |
9744e218 | 2163 | u32 control_1000, status_1000, phyreg, pause_flags, txreg; |
1da177e4 LT |
2164 | |
2165 | /* BMSR_LSTATUS is latched, read it twice: | |
2166 | * we want the current value. | |
2167 | */ | |
2168 | mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
2169 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
2170 | ||
2171 | if (!(mii_status & BMSR_LSTATUS)) { | |
2172 | dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n", | |
2173 | dev->name); | |
2174 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2175 | newdup = 0; | |
2176 | retval = 0; | |
2177 | goto set_speed; | |
2178 | } | |
2179 | ||
2180 | if (np->autoneg == 0) { | |
2181 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n", | |
2182 | dev->name, np->fixed_mode); | |
2183 | if (np->fixed_mode & LPA_100FULL) { | |
2184 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | |
2185 | newdup = 1; | |
2186 | } else if (np->fixed_mode & LPA_100HALF) { | |
2187 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; | |
2188 | newdup = 0; | |
2189 | } else if (np->fixed_mode & LPA_10FULL) { | |
2190 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2191 | newdup = 1; | |
2192 | } else { | |
2193 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2194 | newdup = 0; | |
2195 | } | |
2196 | retval = 1; | |
2197 | goto set_speed; | |
2198 | } | |
2199 | /* check auto negotiation is complete */ | |
2200 | if (!(mii_status & BMSR_ANEGCOMPLETE)) { | |
2201 | /* still in autonegotiation - configure nic for 10 MBit HD and wait. */ | |
2202 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
2203 | newdup = 0; | |
2204 | retval = 0; | |
2205 | dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name); | |
2206 | goto set_speed; | |
2207 | } | |
2208 | ||
b6d0773f AA |
2209 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); |
2210 | lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); | |
2211 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n", | |
2212 | dev->name, adv, lpa); | |
2213 | ||
1da177e4 LT |
2214 | retval = 1; |
2215 | if (np->gigabit == PHY_GIGABIT) { | |
eb91f61b AA |
2216 | control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
2217 | status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); | |
1da177e4 LT |
2218 | |
2219 | if ((control_1000 & ADVERTISE_1000FULL) && | |
2220 | (status_1000 & LPA_1000FULL)) { | |
2221 | dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n", | |
2222 | dev->name); | |
2223 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; | |
2224 | newdup = 1; | |
2225 | goto set_speed; | |
2226 | } | |
2227 | } | |
2228 | ||
1da177e4 | 2229 | /* FIXME: handle parallel detection properly */ |
eb91f61b AA |
2230 | adv_lpa = lpa & adv; |
2231 | if (adv_lpa & LPA_100FULL) { | |
1da177e4 LT |
2232 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
2233 | newdup = 1; | |
eb91f61b | 2234 | } else if (adv_lpa & LPA_100HALF) { |
1da177e4 LT |
2235 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; |
2236 | newdup = 0; | |
eb91f61b | 2237 | } else if (adv_lpa & LPA_10FULL) { |
1da177e4 LT |
2238 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
2239 | newdup = 1; | |
eb91f61b | 2240 | } else if (adv_lpa & LPA_10HALF) { |
1da177e4 LT |
2241 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
2242 | newdup = 0; | |
2243 | } else { | |
eb91f61b | 2244 | dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa); |
1da177e4 LT |
2245 | newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; |
2246 | newdup = 0; | |
2247 | } | |
2248 | ||
2249 | set_speed: | |
2250 | if (np->duplex == newdup && np->linkspeed == newls) | |
2251 | return retval; | |
2252 | ||
2253 | dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n", | |
2254 | dev->name, np->linkspeed, np->duplex, newls, newdup); | |
2255 | ||
2256 | np->duplex = newdup; | |
2257 | np->linkspeed = newls; | |
2258 | ||
2259 | if (np->gigabit == PHY_GIGABIT) { | |
2260 | phyreg = readl(base + NvRegRandomSeed); | |
2261 | phyreg &= ~(0x3FF00); | |
2262 | if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) | |
2263 | phyreg |= NVREG_RNDSEED_FORCE3; | |
2264 | else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) | |
2265 | phyreg |= NVREG_RNDSEED_FORCE2; | |
2266 | else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) | |
2267 | phyreg |= NVREG_RNDSEED_FORCE; | |
2268 | writel(phyreg, base + NvRegRandomSeed); | |
2269 | } | |
2270 | ||
2271 | phyreg = readl(base + NvRegPhyInterface); | |
2272 | phyreg &= ~(PHY_HALF|PHY_100|PHY_1000); | |
2273 | if (np->duplex == 0) | |
2274 | phyreg |= PHY_HALF; | |
2275 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) | |
2276 | phyreg |= PHY_100; | |
2277 | else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | |
2278 | phyreg |= PHY_1000; | |
2279 | writel(phyreg, base + NvRegPhyInterface); | |
2280 | ||
9744e218 AA |
2281 | if (phyreg & PHY_RGMII) { |
2282 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | |
2283 | txreg = NVREG_TX_DEFERRAL_RGMII_1000; | |
2284 | else | |
2285 | txreg = NVREG_TX_DEFERRAL_RGMII_10_100; | |
2286 | } else { | |
2287 | txreg = NVREG_TX_DEFERRAL_DEFAULT; | |
2288 | } | |
2289 | writel(txreg, base + NvRegTxDeferral); | |
2290 | ||
95d161cb AA |
2291 | if (np->desc_ver == DESC_VER_1) { |
2292 | txreg = NVREG_TX_WM_DESC1_DEFAULT; | |
2293 | } else { | |
2294 | if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) | |
2295 | txreg = NVREG_TX_WM_DESC2_3_1000; | |
2296 | else | |
2297 | txreg = NVREG_TX_WM_DESC2_3_DEFAULT; | |
2298 | } | |
2299 | writel(txreg, base + NvRegTxWatermark); | |
2300 | ||
1da177e4 LT |
2301 | writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), |
2302 | base + NvRegMisc1); | |
2303 | pci_push(base); | |
2304 | writel(np->linkspeed, base + NvRegLinkSpeed); | |
2305 | pci_push(base); | |
2306 | ||
b6d0773f AA |
2307 | pause_flags = 0; |
2308 | /* setup pause frame */ | |
eb91f61b | 2309 | if (np->duplex != 0) { |
b6d0773f AA |
2310 | if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { |
2311 | adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); | |
2312 | lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); | |
2313 | ||
2314 | switch (adv_pause) { | |
f82a9352 | 2315 | case ADVERTISE_PAUSE_CAP: |
b6d0773f AA |
2316 | if (lpa_pause & LPA_PAUSE_CAP) { |
2317 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2318 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
2319 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2320 | } | |
2321 | break; | |
f82a9352 | 2322 | case ADVERTISE_PAUSE_ASYM: |
b6d0773f AA |
2323 | if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) |
2324 | { | |
2325 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2326 | } | |
2327 | break; | |
f82a9352 | 2328 | case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: |
b6d0773f AA |
2329 | if (lpa_pause & LPA_PAUSE_CAP) |
2330 | { | |
2331 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2332 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
2333 | pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
2334 | } | |
2335 | if (lpa_pause == LPA_PAUSE_ASYM) | |
2336 | { | |
2337 | pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
2338 | } | |
2339 | break; | |
f3b197ac | 2340 | } |
eb91f61b | 2341 | } else { |
b6d0773f | 2342 | pause_flags = np->pause_flags; |
eb91f61b AA |
2343 | } |
2344 | } | |
b6d0773f | 2345 | nv_update_pause(dev, pause_flags); |
eb91f61b | 2346 | |
1da177e4 LT |
2347 | return retval; |
2348 | } | |
2349 | ||
2350 | static void nv_linkchange(struct net_device *dev) | |
2351 | { | |
2352 | if (nv_update_linkspeed(dev)) { | |
4ea7f299 | 2353 | if (!netif_carrier_ok(dev)) { |
1da177e4 LT |
2354 | netif_carrier_on(dev); |
2355 | printk(KERN_INFO "%s: link up.\n", dev->name); | |
4ea7f299 | 2356 | nv_start_rx(dev); |
1da177e4 | 2357 | } |
1da177e4 LT |
2358 | } else { |
2359 | if (netif_carrier_ok(dev)) { | |
2360 | netif_carrier_off(dev); | |
2361 | printk(KERN_INFO "%s: link down.\n", dev->name); | |
2362 | nv_stop_rx(dev); | |
2363 | } | |
2364 | } | |
2365 | } | |
2366 | ||
2367 | static void nv_link_irq(struct net_device *dev) | |
2368 | { | |
2369 | u8 __iomem *base = get_hwbase(dev); | |
2370 | u32 miistat; | |
2371 | ||
2372 | miistat = readl(base + NvRegMIIStatus); | |
2373 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
2374 | dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat); | |
2375 | ||
2376 | if (miistat & (NVREG_MIISTAT_LINKCHANGE)) | |
2377 | nv_linkchange(dev); | |
2378 | dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name); | |
2379 | } | |
2380 | ||
2381 | static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |
2382 | { | |
2383 | struct net_device *dev = (struct net_device *) data; | |
ac9c1897 | 2384 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2385 | u8 __iomem *base = get_hwbase(dev); |
2386 | u32 events; | |
2387 | int i; | |
2388 | ||
2389 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); | |
2390 | ||
2391 | for (i=0; ; i++) { | |
d33a73c8 AA |
2392 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
2393 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
2394 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
2395 | } else { | |
2396 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
2397 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | |
2398 | } | |
1da177e4 LT |
2399 | pci_push(base); |
2400 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | |
2401 | if (!(events & np->irqmask)) | |
2402 | break; | |
2403 | ||
a971c324 AA |
2404 | spin_lock(&np->lock); |
2405 | nv_tx_done(dev); | |
2406 | spin_unlock(&np->lock); | |
f3b197ac | 2407 | |
1da177e4 LT |
2408 | if (events & NVREG_IRQ_LINK) { |
2409 | spin_lock(&np->lock); | |
2410 | nv_link_irq(dev); | |
2411 | spin_unlock(&np->lock); | |
2412 | } | |
2413 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | |
2414 | spin_lock(&np->lock); | |
2415 | nv_linkchange(dev); | |
2416 | spin_unlock(&np->lock); | |
2417 | np->link_timeout = jiffies + LINK_TIMEOUT; | |
2418 | } | |
2419 | if (events & (NVREG_IRQ_TX_ERR)) { | |
2420 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | |
2421 | dev->name, events); | |
2422 | } | |
2423 | if (events & (NVREG_IRQ_UNKNOWN)) { | |
2424 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | |
2425 | dev->name, events); | |
2426 | } | |
e27cdba5 SH |
2427 | #ifdef CONFIG_FORCEDETH_NAPI |
2428 | if (events & NVREG_IRQ_RX_ALL) { | |
2429 | netif_rx_schedule(dev); | |
2430 | ||
2431 | /* Disable furthur receive irq's */ | |
2432 | spin_lock(&np->lock); | |
2433 | np->irqmask &= ~NVREG_IRQ_RX_ALL; | |
2434 | ||
2435 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
2436 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
2437 | else | |
2438 | writel(np->irqmask, base + NvRegIrqMask); | |
2439 | spin_unlock(&np->lock); | |
2440 | } | |
2441 | #else | |
2442 | nv_rx_process(dev, dev->weight); | |
2443 | if (nv_alloc_rx(dev)) { | |
2444 | spin_lock(&np->lock); | |
2445 | if (!np->in_shutdown) | |
2446 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
2447 | spin_unlock(&np->lock); | |
2448 | } | |
2449 | #endif | |
1da177e4 LT |
2450 | if (i > max_interrupt_work) { |
2451 | spin_lock(&np->lock); | |
2452 | /* disable interrupts on the nic */ | |
d33a73c8 AA |
2453 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) |
2454 | writel(0, base + NvRegIrqMask); | |
2455 | else | |
2456 | writel(np->irqmask, base + NvRegIrqMask); | |
1da177e4 LT |
2457 | pci_push(base); |
2458 | ||
d33a73c8 AA |
2459 | if (!np->in_shutdown) { |
2460 | np->nic_poll_irq = np->irqmask; | |
1da177e4 | 2461 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
d33a73c8 | 2462 | } |
1da177e4 LT |
2463 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); |
2464 | spin_unlock(&np->lock); | |
2465 | break; | |
2466 | } | |
2467 | ||
2468 | } | |
2469 | dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); | |
2470 | ||
2471 | return IRQ_RETVAL(i); | |
2472 | } | |
2473 | ||
d33a73c8 AA |
2474 | static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) |
2475 | { | |
2476 | struct net_device *dev = (struct net_device *) data; | |
2477 | struct fe_priv *np = netdev_priv(dev); | |
2478 | u8 __iomem *base = get_hwbase(dev); | |
2479 | u32 events; | |
2480 | int i; | |
2481 | ||
2482 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); | |
2483 | ||
2484 | for (i=0; ; i++) { | |
2485 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | |
2486 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | |
2487 | pci_push(base); | |
2488 | dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); | |
2489 | if (!(events & np->irqmask)) | |
2490 | break; | |
2491 | ||
84b3932b | 2492 | spin_lock_irq(&np->lock); |
d33a73c8 | 2493 | nv_tx_done(dev); |
84b3932b | 2494 | spin_unlock_irq(&np->lock); |
f3b197ac | 2495 | |
d33a73c8 AA |
2496 | if (events & (NVREG_IRQ_TX_ERR)) { |
2497 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | |
2498 | dev->name, events); | |
2499 | } | |
2500 | if (i > max_interrupt_work) { | |
84b3932b | 2501 | spin_lock_irq(&np->lock); |
d33a73c8 AA |
2502 | /* disable interrupts on the nic */ |
2503 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | |
2504 | pci_push(base); | |
2505 | ||
2506 | if (!np->in_shutdown) { | |
2507 | np->nic_poll_irq |= NVREG_IRQ_TX_ALL; | |
2508 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
2509 | } | |
2510 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | |
84b3932b | 2511 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2512 | break; |
2513 | } | |
2514 | ||
2515 | } | |
2516 | dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); | |
2517 | ||
2518 | return IRQ_RETVAL(i); | |
2519 | } | |
2520 | ||
e27cdba5 SH |
2521 | #ifdef CONFIG_FORCEDETH_NAPI |
2522 | static int nv_napi_poll(struct net_device *dev, int *budget) | |
2523 | { | |
2524 | int pkts, limit = min(*budget, dev->quota); | |
2525 | struct fe_priv *np = netdev_priv(dev); | |
2526 | u8 __iomem *base = get_hwbase(dev); | |
2527 | ||
2528 | pkts = nv_rx_process(dev, limit); | |
2529 | ||
2530 | if (nv_alloc_rx(dev)) { | |
2531 | spin_lock_irq(&np->lock); | |
2532 | if (!np->in_shutdown) | |
2533 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
2534 | spin_unlock_irq(&np->lock); | |
2535 | } | |
2536 | ||
2537 | if (pkts < limit) { | |
2538 | /* all done, no more packets present */ | |
2539 | netif_rx_complete(dev); | |
2540 | ||
2541 | /* re-enable receive interrupts */ | |
2542 | spin_lock_irq(&np->lock); | |
2543 | np->irqmask |= NVREG_IRQ_RX_ALL; | |
2544 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
2545 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
2546 | else | |
2547 | writel(np->irqmask, base + NvRegIrqMask); | |
2548 | spin_unlock_irq(&np->lock); | |
2549 | return 0; | |
2550 | } else { | |
2551 | /* used up our quantum, so reschedule */ | |
2552 | dev->quota -= pkts; | |
2553 | *budget -= pkts; | |
2554 | return 1; | |
2555 | } | |
2556 | } | |
2557 | #endif | |
2558 | ||
2559 | #ifdef CONFIG_FORCEDETH_NAPI | |
2560 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |
2561 | { | |
2562 | struct net_device *dev = (struct net_device *) data; | |
2563 | u8 __iomem *base = get_hwbase(dev); | |
2564 | u32 events; | |
2565 | ||
2566 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | |
2567 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | |
2568 | ||
2569 | if (events) { | |
2570 | netif_rx_schedule(dev); | |
2571 | /* disable receive interrupts on the nic */ | |
2572 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
2573 | pci_push(base); | |
2574 | } | |
2575 | return IRQ_HANDLED; | |
2576 | } | |
2577 | #else | |
d33a73c8 AA |
2578 | static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) |
2579 | { | |
2580 | struct net_device *dev = (struct net_device *) data; | |
2581 | struct fe_priv *np = netdev_priv(dev); | |
2582 | u8 __iomem *base = get_hwbase(dev); | |
2583 | u32 events; | |
2584 | int i; | |
2585 | ||
2586 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); | |
2587 | ||
2588 | for (i=0; ; i++) { | |
2589 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | |
2590 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | |
2591 | pci_push(base); | |
2592 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | |
2593 | if (!(events & np->irqmask)) | |
2594 | break; | |
f3b197ac | 2595 | |
e27cdba5 | 2596 | nv_rx_process(dev, dev->weight); |
d33a73c8 | 2597 | if (nv_alloc_rx(dev)) { |
84b3932b | 2598 | spin_lock_irq(&np->lock); |
d33a73c8 AA |
2599 | if (!np->in_shutdown) |
2600 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
84b3932b | 2601 | spin_unlock_irq(&np->lock); |
d33a73c8 | 2602 | } |
f3b197ac | 2603 | |
d33a73c8 | 2604 | if (i > max_interrupt_work) { |
84b3932b | 2605 | spin_lock_irq(&np->lock); |
d33a73c8 AA |
2606 | /* disable interrupts on the nic */ |
2607 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
2608 | pci_push(base); | |
2609 | ||
2610 | if (!np->in_shutdown) { | |
2611 | np->nic_poll_irq |= NVREG_IRQ_RX_ALL; | |
2612 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
2613 | } | |
2614 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | |
84b3932b | 2615 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2616 | break; |
2617 | } | |
d33a73c8 AA |
2618 | } |
2619 | dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); | |
2620 | ||
2621 | return IRQ_RETVAL(i); | |
2622 | } | |
e27cdba5 | 2623 | #endif |
d33a73c8 AA |
2624 | |
2625 | static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |
2626 | { | |
2627 | struct net_device *dev = (struct net_device *) data; | |
2628 | struct fe_priv *np = netdev_priv(dev); | |
2629 | u8 __iomem *base = get_hwbase(dev); | |
2630 | u32 events; | |
2631 | int i; | |
2632 | ||
2633 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); | |
2634 | ||
2635 | for (i=0; ; i++) { | |
2636 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | |
2637 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | |
2638 | pci_push(base); | |
2639 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | |
2640 | if (!(events & np->irqmask)) | |
2641 | break; | |
f3b197ac | 2642 | |
d33a73c8 | 2643 | if (events & NVREG_IRQ_LINK) { |
84b3932b | 2644 | spin_lock_irq(&np->lock); |
d33a73c8 | 2645 | nv_link_irq(dev); |
84b3932b | 2646 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2647 | } |
2648 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | |
84b3932b | 2649 | spin_lock_irq(&np->lock); |
d33a73c8 | 2650 | nv_linkchange(dev); |
84b3932b | 2651 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2652 | np->link_timeout = jiffies + LINK_TIMEOUT; |
2653 | } | |
2654 | if (events & (NVREG_IRQ_UNKNOWN)) { | |
2655 | printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | |
2656 | dev->name, events); | |
2657 | } | |
2658 | if (i > max_interrupt_work) { | |
84b3932b | 2659 | spin_lock_irq(&np->lock); |
d33a73c8 AA |
2660 | /* disable interrupts on the nic */ |
2661 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | |
2662 | pci_push(base); | |
2663 | ||
2664 | if (!np->in_shutdown) { | |
2665 | np->nic_poll_irq |= NVREG_IRQ_OTHER; | |
2666 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
2667 | } | |
2668 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | |
84b3932b | 2669 | spin_unlock_irq(&np->lock); |
d33a73c8 AA |
2670 | break; |
2671 | } | |
2672 | ||
2673 | } | |
2674 | dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); | |
2675 | ||
2676 | return IRQ_RETVAL(i); | |
2677 | } | |
2678 | ||
9589c77a AA |
2679 | static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs) |
2680 | { | |
2681 | struct net_device *dev = (struct net_device *) data; | |
2682 | struct fe_priv *np = netdev_priv(dev); | |
2683 | u8 __iomem *base = get_hwbase(dev); | |
2684 | u32 events; | |
2685 | ||
2686 | dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name); | |
2687 | ||
2688 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | |
2689 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
2690 | writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); | |
2691 | } else { | |
2692 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
2693 | writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); | |
2694 | } | |
2695 | pci_push(base); | |
2696 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | |
2697 | if (!(events & NVREG_IRQ_TIMER)) | |
2698 | return IRQ_RETVAL(0); | |
2699 | ||
2700 | spin_lock(&np->lock); | |
2701 | np->intr_test = 1; | |
2702 | spin_unlock(&np->lock); | |
2703 | ||
2704 | dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name); | |
2705 | ||
2706 | return IRQ_RETVAL(1); | |
2707 | } | |
2708 | ||
7a1854b7 AA |
2709 | static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) |
2710 | { | |
2711 | u8 __iomem *base = get_hwbase(dev); | |
2712 | int i; | |
2713 | u32 msixmap = 0; | |
2714 | ||
2715 | /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | |
2716 | * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | |
2717 | * the remaining 8 interrupts. | |
2718 | */ | |
2719 | for (i = 0; i < 8; i++) { | |
2720 | if ((irqmask >> i) & 0x1) { | |
2721 | msixmap |= vector << (i << 2); | |
2722 | } | |
2723 | } | |
2724 | writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | |
2725 | ||
2726 | msixmap = 0; | |
2727 | for (i = 0; i < 8; i++) { | |
2728 | if ((irqmask >> (i + 8)) & 0x1) { | |
2729 | msixmap |= vector << (i << 2); | |
2730 | } | |
2731 | } | |
2732 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | |
2733 | } | |
2734 | ||
9589c77a | 2735 | static int nv_request_irq(struct net_device *dev, int intr_test) |
7a1854b7 AA |
2736 | { |
2737 | struct fe_priv *np = get_nvpriv(dev); | |
2738 | u8 __iomem *base = get_hwbase(dev); | |
2739 | int ret = 1; | |
2740 | int i; | |
2741 | ||
2742 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | |
2743 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | |
2744 | np->msi_x_entry[i].entry = i; | |
2745 | } | |
2746 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | |
2747 | np->msi_flags |= NV_MSI_X_ENABLED; | |
9589c77a | 2748 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { |
7a1854b7 | 2749 | /* Request irq for rx handling */ |
1fb9df5d | 2750 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) { |
7a1854b7 AA |
2751 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); |
2752 | pci_disable_msix(np->pci_dev); | |
2753 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2754 | goto out_err; | |
2755 | } | |
2756 | /* Request irq for tx handling */ | |
1fb9df5d | 2757 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) { |
7a1854b7 AA |
2758 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); |
2759 | pci_disable_msix(np->pci_dev); | |
2760 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2761 | goto out_free_rx; | |
2762 | } | |
2763 | /* Request irq for link and timer handling */ | |
1fb9df5d | 2764 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) { |
7a1854b7 AA |
2765 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); |
2766 | pci_disable_msix(np->pci_dev); | |
2767 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2768 | goto out_free_tx; | |
2769 | } | |
2770 | /* map interrupts to their respective vector */ | |
2771 | writel(0, base + NvRegMSIXMap0); | |
2772 | writel(0, base + NvRegMSIXMap1); | |
2773 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | |
2774 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | |
2775 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | |
2776 | } else { | |
2777 | /* Request irq for all interrupts */ | |
9589c77a | 2778 | if ((!intr_test && |
1fb9df5d | 2779 | request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || |
9589c77a | 2780 | (intr_test && |
1fb9df5d | 2781 | request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { |
7a1854b7 AA |
2782 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); |
2783 | pci_disable_msix(np->pci_dev); | |
2784 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2785 | goto out_err; | |
2786 | } | |
2787 | ||
2788 | /* map interrupts to vector 0 */ | |
2789 | writel(0, base + NvRegMSIXMap0); | |
2790 | writel(0, base + NvRegMSIXMap1); | |
2791 | } | |
2792 | } | |
2793 | } | |
2794 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | |
2795 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | |
2796 | np->msi_flags |= NV_MSI_ENABLED; | |
1fb9df5d TG |
2797 | if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || |
2798 | (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) { | |
7a1854b7 AA |
2799 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); |
2800 | pci_disable_msi(np->pci_dev); | |
2801 | np->msi_flags &= ~NV_MSI_ENABLED; | |
2802 | goto out_err; | |
2803 | } | |
2804 | ||
2805 | /* map interrupts to vector 0 */ | |
2806 | writel(0, base + NvRegMSIMap0); | |
2807 | writel(0, base + NvRegMSIMap1); | |
2808 | /* enable msi vector 0 */ | |
2809 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | |
2810 | } | |
2811 | } | |
2812 | if (ret != 0) { | |
1fb9df5d TG |
2813 | if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, IRQF_SHARED, dev->name, dev) != 0) || |
2814 | (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, IRQF_SHARED, dev->name, dev) != 0)) | |
7a1854b7 | 2815 | goto out_err; |
9589c77a | 2816 | |
7a1854b7 AA |
2817 | } |
2818 | ||
2819 | return 0; | |
2820 | out_free_tx: | |
2821 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | |
2822 | out_free_rx: | |
2823 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | |
2824 | out_err: | |
2825 | return 1; | |
2826 | } | |
2827 | ||
2828 | static void nv_free_irq(struct net_device *dev) | |
2829 | { | |
2830 | struct fe_priv *np = get_nvpriv(dev); | |
2831 | int i; | |
2832 | ||
2833 | if (np->msi_flags & NV_MSI_X_ENABLED) { | |
2834 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | |
2835 | free_irq(np->msi_x_entry[i].vector, dev); | |
2836 | } | |
2837 | pci_disable_msix(np->pci_dev); | |
2838 | np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2839 | } else { | |
2840 | free_irq(np->pci_dev->irq, dev); | |
2841 | if (np->msi_flags & NV_MSI_ENABLED) { | |
2842 | pci_disable_msi(np->pci_dev); | |
2843 | np->msi_flags &= ~NV_MSI_ENABLED; | |
2844 | } | |
2845 | } | |
2846 | } | |
2847 | ||
1da177e4 LT |
2848 | static void nv_do_nic_poll(unsigned long data) |
2849 | { | |
2850 | struct net_device *dev = (struct net_device *) data; | |
ac9c1897 | 2851 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 2852 | u8 __iomem *base = get_hwbase(dev); |
d33a73c8 | 2853 | u32 mask = 0; |
1da177e4 | 2854 | |
1da177e4 | 2855 | /* |
d33a73c8 | 2856 | * First disable irq(s) and then |
1da177e4 LT |
2857 | * reenable interrupts on the nic, we have to do this before calling |
2858 | * nv_nic_irq because that may decide to do otherwise | |
2859 | */ | |
d33a73c8 | 2860 | |
84b3932b AA |
2861 | if (!using_multi_irqs(dev)) { |
2862 | if (np->msi_flags & NV_MSI_X_ENABLED) | |
8688cfce | 2863 | disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
84b3932b | 2864 | else |
8688cfce | 2865 | disable_irq_lockdep(dev->irq); |
d33a73c8 AA |
2866 | mask = np->irqmask; |
2867 | } else { | |
2868 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | |
8688cfce | 2869 | disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
d33a73c8 AA |
2870 | mask |= NVREG_IRQ_RX_ALL; |
2871 | } | |
2872 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | |
8688cfce | 2873 | disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); |
d33a73c8 AA |
2874 | mask |= NVREG_IRQ_TX_ALL; |
2875 | } | |
2876 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | |
8688cfce | 2877 | disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); |
d33a73c8 AA |
2878 | mask |= NVREG_IRQ_OTHER; |
2879 | } | |
2880 | } | |
2881 | np->nic_poll_irq = 0; | |
2882 | ||
2883 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | |
f3b197ac | 2884 | |
d33a73c8 | 2885 | writel(mask, base + NvRegIrqMask); |
1da177e4 | 2886 | pci_push(base); |
d33a73c8 | 2887 | |
84b3932b | 2888 | if (!using_multi_irqs(dev)) { |
479ceddd | 2889 | nv_nic_irq(0, dev, NULL); |
84b3932b | 2890 | if (np->msi_flags & NV_MSI_X_ENABLED) |
8688cfce | 2891 | enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
84b3932b | 2892 | else |
8688cfce | 2893 | enable_irq_lockdep(dev->irq); |
d33a73c8 AA |
2894 | } else { |
2895 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | |
479ceddd | 2896 | nv_nic_irq_rx(0, dev, NULL); |
8688cfce | 2897 | enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
d33a73c8 AA |
2898 | } |
2899 | if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | |
479ceddd | 2900 | nv_nic_irq_tx(0, dev, NULL); |
8688cfce | 2901 | enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); |
d33a73c8 AA |
2902 | } |
2903 | if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | |
479ceddd | 2904 | nv_nic_irq_other(0, dev, NULL); |
8688cfce | 2905 | enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); |
d33a73c8 AA |
2906 | } |
2907 | } | |
1da177e4 LT |
2908 | } |
2909 | ||
2918c35d MS |
2910 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2911 | static void nv_poll_controller(struct net_device *dev) | |
2912 | { | |
2913 | nv_do_nic_poll((unsigned long) dev); | |
2914 | } | |
2915 | #endif | |
2916 | ||
52da3578 AA |
2917 | static void nv_do_stats_poll(unsigned long data) |
2918 | { | |
2919 | struct net_device *dev = (struct net_device *) data; | |
2920 | struct fe_priv *np = netdev_priv(dev); | |
2921 | u8 __iomem *base = get_hwbase(dev); | |
2922 | ||
2923 | np->estats.tx_bytes += readl(base + NvRegTxCnt); | |
2924 | np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); | |
2925 | np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); | |
2926 | np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); | |
2927 | np->estats.tx_late_collision += readl(base + NvRegTxLateCol); | |
2928 | np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); | |
2929 | np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); | |
2930 | np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); | |
2931 | np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); | |
2932 | np->estats.tx_deferral += readl(base + NvRegTxDef); | |
2933 | np->estats.tx_packets += readl(base + NvRegTxFrame); | |
2934 | np->estats.tx_pause += readl(base + NvRegTxPause); | |
2935 | np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); | |
2936 | np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); | |
2937 | np->estats.rx_late_collision += readl(base + NvRegRxLateCol); | |
2938 | np->estats.rx_runt += readl(base + NvRegRxRunt); | |
2939 | np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); | |
2940 | np->estats.rx_over_errors += readl(base + NvRegRxOverflow); | |
2941 | np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); | |
2942 | np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); | |
2943 | np->estats.rx_length_error += readl(base + NvRegRxLenErr); | |
2944 | np->estats.rx_unicast += readl(base + NvRegRxUnicast); | |
2945 | np->estats.rx_multicast += readl(base + NvRegRxMulticast); | |
2946 | np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); | |
2947 | np->estats.rx_bytes += readl(base + NvRegRxCnt); | |
2948 | np->estats.rx_pause += readl(base + NvRegRxPause); | |
2949 | np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); | |
2950 | np->estats.rx_packets = | |
2951 | np->estats.rx_unicast + | |
2952 | np->estats.rx_multicast + | |
2953 | np->estats.rx_broadcast; | |
2954 | np->estats.rx_errors_total = | |
2955 | np->estats.rx_crc_errors + | |
2956 | np->estats.rx_over_errors + | |
2957 | np->estats.rx_frame_error + | |
2958 | (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + | |
2959 | np->estats.rx_late_collision + | |
2960 | np->estats.rx_runt + | |
2961 | np->estats.rx_frame_too_long; | |
2962 | ||
2963 | if (!np->in_shutdown) | |
2964 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); | |
2965 | } | |
2966 | ||
1da177e4 LT |
2967 | static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
2968 | { | |
ac9c1897 | 2969 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2970 | strcpy(info->driver, "forcedeth"); |
2971 | strcpy(info->version, FORCEDETH_VERSION); | |
2972 | strcpy(info->bus_info, pci_name(np->pci_dev)); | |
2973 | } | |
2974 | ||
2975 | static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) | |
2976 | { | |
ac9c1897 | 2977 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
2978 | wolinfo->supported = WAKE_MAGIC; |
2979 | ||
2980 | spin_lock_irq(&np->lock); | |
2981 | if (np->wolenabled) | |
2982 | wolinfo->wolopts = WAKE_MAGIC; | |
2983 | spin_unlock_irq(&np->lock); | |
2984 | } | |
2985 | ||
2986 | static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) | |
2987 | { | |
ac9c1897 | 2988 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 2989 | u8 __iomem *base = get_hwbase(dev); |
c42d9df9 | 2990 | u32 flags = 0; |
1da177e4 | 2991 | |
1da177e4 | 2992 | if (wolinfo->wolopts == 0) { |
1da177e4 | 2993 | np->wolenabled = 0; |
c42d9df9 | 2994 | } else if (wolinfo->wolopts & WAKE_MAGIC) { |
1da177e4 | 2995 | np->wolenabled = 1; |
c42d9df9 AA |
2996 | flags = NVREG_WAKEUPFLAGS_ENABLE; |
2997 | } | |
2998 | if (netif_running(dev)) { | |
2999 | spin_lock_irq(&np->lock); | |
3000 | writel(flags, base + NvRegWakeUpFlags); | |
3001 | spin_unlock_irq(&np->lock); | |
1da177e4 | 3002 | } |
1da177e4 LT |
3003 | return 0; |
3004 | } | |
3005 | ||
3006 | static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |
3007 | { | |
3008 | struct fe_priv *np = netdev_priv(dev); | |
3009 | int adv; | |
3010 | ||
3011 | spin_lock_irq(&np->lock); | |
3012 | ecmd->port = PORT_MII; | |
3013 | if (!netif_running(dev)) { | |
3014 | /* We do not track link speed / duplex setting if the | |
3015 | * interface is disabled. Force a link check */ | |
f9430a01 AA |
3016 | if (nv_update_linkspeed(dev)) { |
3017 | if (!netif_carrier_ok(dev)) | |
3018 | netif_carrier_on(dev); | |
3019 | } else { | |
3020 | if (netif_carrier_ok(dev)) | |
3021 | netif_carrier_off(dev); | |
3022 | } | |
1da177e4 | 3023 | } |
f9430a01 AA |
3024 | |
3025 | if (netif_carrier_ok(dev)) { | |
3026 | switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { | |
1da177e4 LT |
3027 | case NVREG_LINKSPEED_10: |
3028 | ecmd->speed = SPEED_10; | |
3029 | break; | |
3030 | case NVREG_LINKSPEED_100: | |
3031 | ecmd->speed = SPEED_100; | |
3032 | break; | |
3033 | case NVREG_LINKSPEED_1000: | |
3034 | ecmd->speed = SPEED_1000; | |
3035 | break; | |
f9430a01 AA |
3036 | } |
3037 | ecmd->duplex = DUPLEX_HALF; | |
3038 | if (np->duplex) | |
3039 | ecmd->duplex = DUPLEX_FULL; | |
3040 | } else { | |
3041 | ecmd->speed = -1; | |
3042 | ecmd->duplex = -1; | |
1da177e4 | 3043 | } |
1da177e4 LT |
3044 | |
3045 | ecmd->autoneg = np->autoneg; | |
3046 | ||
3047 | ecmd->advertising = ADVERTISED_MII; | |
3048 | if (np->autoneg) { | |
3049 | ecmd->advertising |= ADVERTISED_Autoneg; | |
3050 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
f9430a01 AA |
3051 | if (adv & ADVERTISE_10HALF) |
3052 | ecmd->advertising |= ADVERTISED_10baseT_Half; | |
3053 | if (adv & ADVERTISE_10FULL) | |
3054 | ecmd->advertising |= ADVERTISED_10baseT_Full; | |
3055 | if (adv & ADVERTISE_100HALF) | |
3056 | ecmd->advertising |= ADVERTISED_100baseT_Half; | |
3057 | if (adv & ADVERTISE_100FULL) | |
3058 | ecmd->advertising |= ADVERTISED_100baseT_Full; | |
3059 | if (np->gigabit == PHY_GIGABIT) { | |
3060 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); | |
3061 | if (adv & ADVERTISE_1000FULL) | |
3062 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | |
3063 | } | |
1da177e4 | 3064 | } |
1da177e4 LT |
3065 | ecmd->supported = (SUPPORTED_Autoneg | |
3066 | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | |
3067 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | |
3068 | SUPPORTED_MII); | |
3069 | if (np->gigabit == PHY_GIGABIT) | |
3070 | ecmd->supported |= SUPPORTED_1000baseT_Full; | |
3071 | ||
3072 | ecmd->phy_address = np->phyaddr; | |
3073 | ecmd->transceiver = XCVR_EXTERNAL; | |
3074 | ||
3075 | /* ignore maxtxpkt, maxrxpkt for now */ | |
3076 | spin_unlock_irq(&np->lock); | |
3077 | return 0; | |
3078 | } | |
3079 | ||
3080 | static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | |
3081 | { | |
3082 | struct fe_priv *np = netdev_priv(dev); | |
3083 | ||
3084 | if (ecmd->port != PORT_MII) | |
3085 | return -EINVAL; | |
3086 | if (ecmd->transceiver != XCVR_EXTERNAL) | |
3087 | return -EINVAL; | |
3088 | if (ecmd->phy_address != np->phyaddr) { | |
3089 | /* TODO: support switching between multiple phys. Should be | |
3090 | * trivial, but not enabled due to lack of test hardware. */ | |
3091 | return -EINVAL; | |
3092 | } | |
3093 | if (ecmd->autoneg == AUTONEG_ENABLE) { | |
3094 | u32 mask; | |
3095 | ||
3096 | mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | |
3097 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; | |
3098 | if (np->gigabit == PHY_GIGABIT) | |
3099 | mask |= ADVERTISED_1000baseT_Full; | |
3100 | ||
3101 | if ((ecmd->advertising & mask) == 0) | |
3102 | return -EINVAL; | |
3103 | ||
3104 | } else if (ecmd->autoneg == AUTONEG_DISABLE) { | |
3105 | /* Note: autonegotiation disable, speed 1000 intentionally | |
3106 | * forbidden - noone should need that. */ | |
3107 | ||
3108 | if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) | |
3109 | return -EINVAL; | |
3110 | if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) | |
3111 | return -EINVAL; | |
3112 | } else { | |
3113 | return -EINVAL; | |
3114 | } | |
3115 | ||
f9430a01 AA |
3116 | netif_carrier_off(dev); |
3117 | if (netif_running(dev)) { | |
3118 | nv_disable_irq(dev); | |
58dfd9c1 | 3119 | netif_tx_lock_bh(dev); |
f9430a01 AA |
3120 | spin_lock(&np->lock); |
3121 | /* stop engines */ | |
3122 | nv_stop_rx(dev); | |
3123 | nv_stop_tx(dev); | |
3124 | spin_unlock(&np->lock); | |
58dfd9c1 | 3125 | netif_tx_unlock_bh(dev); |
f9430a01 AA |
3126 | } |
3127 | ||
1da177e4 LT |
3128 | if (ecmd->autoneg == AUTONEG_ENABLE) { |
3129 | int adv, bmcr; | |
3130 | ||
3131 | np->autoneg = 1; | |
3132 | ||
3133 | /* advertise only what has been requested */ | |
3134 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
eb91f61b | 3135 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
1da177e4 LT |
3136 | if (ecmd->advertising & ADVERTISED_10baseT_Half) |
3137 | adv |= ADVERTISE_10HALF; | |
3138 | if (ecmd->advertising & ADVERTISED_10baseT_Full) | |
b6d0773f | 3139 | adv |= ADVERTISE_10FULL; |
1da177e4 LT |
3140 | if (ecmd->advertising & ADVERTISED_100baseT_Half) |
3141 | adv |= ADVERTISE_100HALF; | |
3142 | if (ecmd->advertising & ADVERTISED_100baseT_Full) | |
b6d0773f AA |
3143 | adv |= ADVERTISE_100FULL; |
3144 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ | |
3145 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | |
3146 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
3147 | adv |= ADVERTISE_PAUSE_ASYM; | |
1da177e4 LT |
3148 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
3149 | ||
3150 | if (np->gigabit == PHY_GIGABIT) { | |
eb91f61b | 3151 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 LT |
3152 | adv &= ~ADVERTISE_1000FULL; |
3153 | if (ecmd->advertising & ADVERTISED_1000baseT_Full) | |
3154 | adv |= ADVERTISE_1000FULL; | |
eb91f61b | 3155 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
1da177e4 LT |
3156 | } |
3157 | ||
f9430a01 AA |
3158 | if (netif_running(dev)) |
3159 | printk(KERN_INFO "%s: link down.\n", dev->name); | |
1da177e4 LT |
3160 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
3161 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
3162 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
3163 | ||
3164 | } else { | |
3165 | int adv, bmcr; | |
3166 | ||
3167 | np->autoneg = 0; | |
3168 | ||
3169 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
eb91f61b | 3170 | adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); |
1da177e4 LT |
3171 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) |
3172 | adv |= ADVERTISE_10HALF; | |
3173 | if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) | |
b6d0773f | 3174 | adv |= ADVERTISE_10FULL; |
1da177e4 LT |
3175 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) |
3176 | adv |= ADVERTISE_100HALF; | |
3177 | if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) | |
b6d0773f AA |
3178 | adv |= ADVERTISE_100FULL; |
3179 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); | |
3180 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */ | |
3181 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | |
3182 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
3183 | } | |
3184 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { | |
3185 | adv |= ADVERTISE_PAUSE_ASYM; | |
3186 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
3187 | } | |
1da177e4 LT |
3188 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); |
3189 | np->fixed_mode = adv; | |
3190 | ||
3191 | if (np->gigabit == PHY_GIGABIT) { | |
eb91f61b | 3192 | adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); |
1da177e4 | 3193 | adv &= ~ADVERTISE_1000FULL; |
eb91f61b | 3194 | mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); |
1da177e4 LT |
3195 | } |
3196 | ||
3197 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
f9430a01 AA |
3198 | bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX); |
3199 | if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) | |
1da177e4 | 3200 | bmcr |= BMCR_FULLDPLX; |
f9430a01 | 3201 | if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) |
1da177e4 LT |
3202 | bmcr |= BMCR_SPEED100; |
3203 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
f9430a01 AA |
3204 | if (np->phy_oui == PHY_OUI_MARVELL) { |
3205 | /* reset the phy */ | |
3206 | if (phy_reset(dev)) { | |
3207 | printk(KERN_INFO "%s: phy reset failed\n", dev->name); | |
3208 | return -EINVAL; | |
3209 | } | |
3210 | } else if (netif_running(dev)) { | |
1da177e4 LT |
3211 | /* Wait a bit and then reconfigure the nic. */ |
3212 | udelay(10); | |
3213 | nv_linkchange(dev); | |
3214 | } | |
3215 | } | |
f9430a01 AA |
3216 | |
3217 | if (netif_running(dev)) { | |
3218 | nv_start_rx(dev); | |
3219 | nv_start_tx(dev); | |
3220 | nv_enable_irq(dev); | |
3221 | } | |
1da177e4 LT |
3222 | |
3223 | return 0; | |
3224 | } | |
3225 | ||
dc8216c1 | 3226 | #define FORCEDETH_REGS_VER 1 |
dc8216c1 MS |
3227 | |
3228 | static int nv_get_regs_len(struct net_device *dev) | |
3229 | { | |
86a0f043 AA |
3230 | struct fe_priv *np = netdev_priv(dev); |
3231 | return np->register_size; | |
dc8216c1 MS |
3232 | } |
3233 | ||
3234 | static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) | |
3235 | { | |
ac9c1897 | 3236 | struct fe_priv *np = netdev_priv(dev); |
dc8216c1 MS |
3237 | u8 __iomem *base = get_hwbase(dev); |
3238 | u32 *rbuf = buf; | |
3239 | int i; | |
3240 | ||
3241 | regs->version = FORCEDETH_REGS_VER; | |
3242 | spin_lock_irq(&np->lock); | |
86a0f043 | 3243 | for (i = 0;i <= np->register_size/sizeof(u32); i++) |
dc8216c1 MS |
3244 | rbuf[i] = readl(base + i*sizeof(u32)); |
3245 | spin_unlock_irq(&np->lock); | |
3246 | } | |
3247 | ||
3248 | static int nv_nway_reset(struct net_device *dev) | |
3249 | { | |
ac9c1897 | 3250 | struct fe_priv *np = netdev_priv(dev); |
dc8216c1 MS |
3251 | int ret; |
3252 | ||
dc8216c1 MS |
3253 | if (np->autoneg) { |
3254 | int bmcr; | |
3255 | ||
f9430a01 AA |
3256 | netif_carrier_off(dev); |
3257 | if (netif_running(dev)) { | |
3258 | nv_disable_irq(dev); | |
58dfd9c1 | 3259 | netif_tx_lock_bh(dev); |
f9430a01 AA |
3260 | spin_lock(&np->lock); |
3261 | /* stop engines */ | |
3262 | nv_stop_rx(dev); | |
3263 | nv_stop_tx(dev); | |
3264 | spin_unlock(&np->lock); | |
58dfd9c1 | 3265 | netif_tx_unlock_bh(dev); |
f9430a01 AA |
3266 | printk(KERN_INFO "%s: link down.\n", dev->name); |
3267 | } | |
3268 | ||
dc8216c1 MS |
3269 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); |
3270 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
3271 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
3272 | ||
f9430a01 AA |
3273 | if (netif_running(dev)) { |
3274 | nv_start_rx(dev); | |
3275 | nv_start_tx(dev); | |
3276 | nv_enable_irq(dev); | |
3277 | } | |
dc8216c1 MS |
3278 | ret = 0; |
3279 | } else { | |
3280 | ret = -EINVAL; | |
3281 | } | |
dc8216c1 MS |
3282 | |
3283 | return ret; | |
3284 | } | |
3285 | ||
0674d594 ZA |
3286 | static int nv_set_tso(struct net_device *dev, u32 value) |
3287 | { | |
3288 | struct fe_priv *np = netdev_priv(dev); | |
3289 | ||
3290 | if ((np->driver_data & DEV_HAS_CHECKSUM)) | |
3291 | return ethtool_op_set_tso(dev, value); | |
3292 | else | |
6a78814f | 3293 | return -EOPNOTSUPP; |
0674d594 | 3294 | } |
0674d594 | 3295 | |
eafa59f6 AA |
3296 | static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) |
3297 | { | |
3298 | struct fe_priv *np = netdev_priv(dev); | |
3299 | ||
3300 | ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; | |
3301 | ring->rx_mini_max_pending = 0; | |
3302 | ring->rx_jumbo_max_pending = 0; | |
3303 | ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; | |
3304 | ||
3305 | ring->rx_pending = np->rx_ring_size; | |
3306 | ring->rx_mini_pending = 0; | |
3307 | ring->rx_jumbo_pending = 0; | |
3308 | ring->tx_pending = np->tx_ring_size; | |
3309 | } | |
3310 | ||
3311 | static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) | |
3312 | { | |
3313 | struct fe_priv *np = netdev_priv(dev); | |
3314 | u8 __iomem *base = get_hwbase(dev); | |
3315 | u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len; | |
3316 | dma_addr_t ring_addr; | |
3317 | ||
3318 | if (ring->rx_pending < RX_RING_MIN || | |
3319 | ring->tx_pending < TX_RING_MIN || | |
3320 | ring->rx_mini_pending != 0 || | |
3321 | ring->rx_jumbo_pending != 0 || | |
3322 | (np->desc_ver == DESC_VER_1 && | |
3323 | (ring->rx_pending > RING_MAX_DESC_VER_1 || | |
3324 | ring->tx_pending > RING_MAX_DESC_VER_1)) || | |
3325 | (np->desc_ver != DESC_VER_1 && | |
3326 | (ring->rx_pending > RING_MAX_DESC_VER_2_3 || | |
3327 | ring->tx_pending > RING_MAX_DESC_VER_2_3))) { | |
3328 | return -EINVAL; | |
3329 | } | |
3330 | ||
3331 | /* allocate new rings */ | |
3332 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
3333 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | |
3334 | sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), | |
3335 | &ring_addr); | |
3336 | } else { | |
3337 | rxtx_ring = pci_alloc_consistent(np->pci_dev, | |
3338 | sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | |
3339 | &ring_addr); | |
3340 | } | |
3341 | rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL); | |
3342 | rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL); | |
3343 | tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL); | |
3344 | tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL); | |
3345 | tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL); | |
3346 | if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { | |
3347 | /* fall back to old rings */ | |
3348 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 | 3349 | if (rxtx_ring) |
eafa59f6 AA |
3350 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), |
3351 | rxtx_ring, ring_addr); | |
3352 | } else { | |
3353 | if (rxtx_ring) | |
3354 | pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), | |
3355 | rxtx_ring, ring_addr); | |
3356 | } | |
3357 | if (rx_skbuff) | |
3358 | kfree(rx_skbuff); | |
3359 | if (rx_dma) | |
3360 | kfree(rx_dma); | |
3361 | if (tx_skbuff) | |
3362 | kfree(tx_skbuff); | |
3363 | if (tx_dma) | |
3364 | kfree(tx_dma); | |
3365 | if (tx_dma_len) | |
3366 | kfree(tx_dma_len); | |
3367 | goto exit; | |
3368 | } | |
3369 | ||
3370 | if (netif_running(dev)) { | |
3371 | nv_disable_irq(dev); | |
58dfd9c1 | 3372 | netif_tx_lock_bh(dev); |
eafa59f6 AA |
3373 | spin_lock(&np->lock); |
3374 | /* stop engines */ | |
3375 | nv_stop_rx(dev); | |
3376 | nv_stop_tx(dev); | |
3377 | nv_txrx_reset(dev); | |
3378 | /* drain queues */ | |
3379 | nv_drain_rx(dev); | |
3380 | nv_drain_tx(dev); | |
3381 | /* delete queues */ | |
3382 | free_rings(dev); | |
3383 | } | |
3384 | ||
3385 | /* set new values */ | |
3386 | np->rx_ring_size = ring->rx_pending; | |
3387 | np->tx_ring_size = ring->tx_pending; | |
3388 | np->tx_limit_stop = ring->tx_pending - TX_LIMIT_DIFFERENCE; | |
3389 | np->tx_limit_start = ring->tx_pending - TX_LIMIT_DIFFERENCE - 1; | |
3390 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
3391 | np->rx_ring.orig = (struct ring_desc*)rxtx_ring; | |
3392 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; | |
3393 | } else { | |
3394 | np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; | |
3395 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; | |
3396 | } | |
3397 | np->rx_skbuff = (struct sk_buff**)rx_skbuff; | |
3398 | np->rx_dma = (dma_addr_t*)rx_dma; | |
3399 | np->tx_skbuff = (struct sk_buff**)tx_skbuff; | |
3400 | np->tx_dma = (dma_addr_t*)tx_dma; | |
3401 | np->tx_dma_len = (unsigned int*)tx_dma_len; | |
3402 | np->ring_addr = ring_addr; | |
3403 | ||
3404 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | |
3405 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | |
3406 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | |
3407 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | |
3408 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | |
3409 | ||
3410 | if (netif_running(dev)) { | |
3411 | /* reinit driver view of the queues */ | |
3412 | set_bufsize(dev); | |
3413 | if (nv_init_ring(dev)) { | |
3414 | if (!np->in_shutdown) | |
3415 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
3416 | } | |
3417 | ||
3418 | /* reinit nic view of the queues */ | |
3419 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
3420 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | |
3421 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | |
3422 | base + NvRegRingSizes); | |
3423 | pci_push(base); | |
3424 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
3425 | pci_push(base); | |
3426 | ||
3427 | /* restart engines */ | |
3428 | nv_start_rx(dev); | |
3429 | nv_start_tx(dev); | |
3430 | spin_unlock(&np->lock); | |
58dfd9c1 | 3431 | netif_tx_unlock_bh(dev); |
eafa59f6 AA |
3432 | nv_enable_irq(dev); |
3433 | } | |
3434 | return 0; | |
3435 | exit: | |
3436 | return -ENOMEM; | |
3437 | } | |
3438 | ||
b6d0773f AA |
3439 | static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) |
3440 | { | |
3441 | struct fe_priv *np = netdev_priv(dev); | |
3442 | ||
3443 | pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; | |
3444 | pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; | |
3445 | pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; | |
3446 | } | |
3447 | ||
3448 | static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause) | |
3449 | { | |
3450 | struct fe_priv *np = netdev_priv(dev); | |
3451 | int adv, bmcr; | |
3452 | ||
3453 | if ((!np->autoneg && np->duplex == 0) || | |
3454 | (np->autoneg && !pause->autoneg && np->duplex == 0)) { | |
3455 | printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", | |
3456 | dev->name); | |
3457 | return -EINVAL; | |
3458 | } | |
3459 | if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { | |
3460 | printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); | |
3461 | return -EINVAL; | |
3462 | } | |
3463 | ||
3464 | netif_carrier_off(dev); | |
3465 | if (netif_running(dev)) { | |
3466 | nv_disable_irq(dev); | |
58dfd9c1 | 3467 | netif_tx_lock_bh(dev); |
b6d0773f AA |
3468 | spin_lock(&np->lock); |
3469 | /* stop engines */ | |
3470 | nv_stop_rx(dev); | |
3471 | nv_stop_tx(dev); | |
3472 | spin_unlock(&np->lock); | |
58dfd9c1 | 3473 | netif_tx_unlock_bh(dev); |
b6d0773f AA |
3474 | } |
3475 | ||
3476 | np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); | |
3477 | if (pause->rx_pause) | |
3478 | np->pause_flags |= NV_PAUSEFRAME_RX_REQ; | |
3479 | if (pause->tx_pause) | |
3480 | np->pause_flags |= NV_PAUSEFRAME_TX_REQ; | |
3481 | ||
3482 | if (np->autoneg && pause->autoneg) { | |
3483 | np->pause_flags |= NV_PAUSEFRAME_AUTONEG; | |
3484 | ||
3485 | adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); | |
3486 | adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); | |
3487 | if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */ | |
3488 | adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; | |
3489 | if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) | |
3490 | adv |= ADVERTISE_PAUSE_ASYM; | |
3491 | mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); | |
3492 | ||
3493 | if (netif_running(dev)) | |
3494 | printk(KERN_INFO "%s: link down.\n", dev->name); | |
3495 | bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); | |
3496 | bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); | |
3497 | mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); | |
3498 | } else { | |
3499 | np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); | |
3500 | if (pause->rx_pause) | |
3501 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | |
3502 | if (pause->tx_pause) | |
3503 | np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; | |
3504 | ||
3505 | if (!netif_running(dev)) | |
3506 | nv_update_linkspeed(dev); | |
3507 | else | |
3508 | nv_update_pause(dev, np->pause_flags); | |
3509 | } | |
3510 | ||
3511 | if (netif_running(dev)) { | |
3512 | nv_start_rx(dev); | |
3513 | nv_start_tx(dev); | |
3514 | nv_enable_irq(dev); | |
3515 | } | |
3516 | return 0; | |
3517 | } | |
3518 | ||
5ed2616f AA |
3519 | static u32 nv_get_rx_csum(struct net_device *dev) |
3520 | { | |
3521 | struct fe_priv *np = netdev_priv(dev); | |
3522 | return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0; | |
3523 | } | |
3524 | ||
3525 | static int nv_set_rx_csum(struct net_device *dev, u32 data) | |
3526 | { | |
3527 | struct fe_priv *np = netdev_priv(dev); | |
3528 | u8 __iomem *base = get_hwbase(dev); | |
3529 | int retcode = 0; | |
3530 | ||
3531 | if (np->driver_data & DEV_HAS_CHECKSUM) { | |
3532 | ||
3533 | if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) || | |
3534 | (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) { | |
3535 | /* already set or unset */ | |
3536 | return 0; | |
3537 | } | |
3538 | ||
3539 | if (data) { | |
3540 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; | |
3541 | } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) { | |
3542 | np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; | |
3543 | } else { | |
3544 | printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n"); | |
3545 | return -EINVAL; | |
3546 | } | |
3547 | ||
3548 | if (netif_running(dev)) { | |
3549 | spin_lock_irq(&np->lock); | |
3550 | writel(np->txrxctl_bits, base + NvRegTxRxControl); | |
3551 | spin_unlock_irq(&np->lock); | |
3552 | } | |
3553 | } else { | |
3554 | return -EINVAL; | |
3555 | } | |
3556 | ||
3557 | return retcode; | |
3558 | } | |
3559 | ||
3560 | static int nv_set_tx_csum(struct net_device *dev, u32 data) | |
3561 | { | |
3562 | struct fe_priv *np = netdev_priv(dev); | |
3563 | ||
3564 | if (np->driver_data & DEV_HAS_CHECKSUM) | |
3565 | return ethtool_op_set_tx_hw_csum(dev, data); | |
3566 | else | |
3567 | return -EOPNOTSUPP; | |
3568 | } | |
3569 | ||
3570 | static int nv_set_sg(struct net_device *dev, u32 data) | |
3571 | { | |
3572 | struct fe_priv *np = netdev_priv(dev); | |
3573 | ||
3574 | if (np->driver_data & DEV_HAS_CHECKSUM) | |
3575 | return ethtool_op_set_sg(dev, data); | |
3576 | else | |
3577 | return -EOPNOTSUPP; | |
3578 | } | |
3579 | ||
52da3578 AA |
3580 | static int nv_get_stats_count(struct net_device *dev) |
3581 | { | |
3582 | struct fe_priv *np = netdev_priv(dev); | |
3583 | ||
3584 | if (np->driver_data & DEV_HAS_STATISTICS) | |
f82a9352 | 3585 | return sizeof(struct nv_ethtool_stats)/sizeof(u64); |
52da3578 AA |
3586 | else |
3587 | return 0; | |
3588 | } | |
3589 | ||
3590 | static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer) | |
3591 | { | |
3592 | struct fe_priv *np = netdev_priv(dev); | |
3593 | ||
3594 | /* update stats */ | |
3595 | nv_do_stats_poll((unsigned long)dev); | |
3596 | ||
3597 | memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64)); | |
3598 | } | |
3599 | ||
9589c77a AA |
3600 | static int nv_self_test_count(struct net_device *dev) |
3601 | { | |
3602 | struct fe_priv *np = netdev_priv(dev); | |
3603 | ||
3604 | if (np->driver_data & DEV_HAS_TEST_EXTENDED) | |
3605 | return NV_TEST_COUNT_EXTENDED; | |
3606 | else | |
3607 | return NV_TEST_COUNT_BASE; | |
3608 | } | |
3609 | ||
3610 | static int nv_link_test(struct net_device *dev) | |
3611 | { | |
3612 | struct fe_priv *np = netdev_priv(dev); | |
3613 | int mii_status; | |
3614 | ||
3615 | mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
3616 | mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); | |
3617 | ||
3618 | /* check phy link status */ | |
3619 | if (!(mii_status & BMSR_LSTATUS)) | |
3620 | return 0; | |
3621 | else | |
3622 | return 1; | |
3623 | } | |
3624 | ||
3625 | static int nv_register_test(struct net_device *dev) | |
3626 | { | |
3627 | u8 __iomem *base = get_hwbase(dev); | |
3628 | int i = 0; | |
3629 | u32 orig_read, new_read; | |
3630 | ||
3631 | do { | |
3632 | orig_read = readl(base + nv_registers_test[i].reg); | |
3633 | ||
3634 | /* xor with mask to toggle bits */ | |
3635 | orig_read ^= nv_registers_test[i].mask; | |
3636 | ||
3637 | writel(orig_read, base + nv_registers_test[i].reg); | |
3638 | ||
3639 | new_read = readl(base + nv_registers_test[i].reg); | |
3640 | ||
3641 | if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask)) | |
3642 | return 0; | |
3643 | ||
3644 | /* restore original value */ | |
3645 | orig_read ^= nv_registers_test[i].mask; | |
3646 | writel(orig_read, base + nv_registers_test[i].reg); | |
3647 | ||
3648 | } while (nv_registers_test[++i].reg != 0); | |
3649 | ||
3650 | return 1; | |
3651 | } | |
3652 | ||
3653 | static int nv_interrupt_test(struct net_device *dev) | |
3654 | { | |
3655 | struct fe_priv *np = netdev_priv(dev); | |
3656 | u8 __iomem *base = get_hwbase(dev); | |
3657 | int ret = 1; | |
3658 | int testcnt; | |
3659 | u32 save_msi_flags, save_poll_interval = 0; | |
3660 | ||
3661 | if (netif_running(dev)) { | |
3662 | /* free current irq */ | |
3663 | nv_free_irq(dev); | |
3664 | save_poll_interval = readl(base+NvRegPollingInterval); | |
3665 | } | |
3666 | ||
3667 | /* flag to test interrupt handler */ | |
3668 | np->intr_test = 0; | |
3669 | ||
3670 | /* setup test irq */ | |
3671 | save_msi_flags = np->msi_flags; | |
3672 | np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; | |
3673 | np->msi_flags |= 0x001; /* setup 1 vector */ | |
3674 | if (nv_request_irq(dev, 1)) | |
3675 | return 0; | |
3676 | ||
3677 | /* setup timer interrupt */ | |
3678 | writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); | |
3679 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); | |
3680 | ||
3681 | nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER); | |
3682 | ||
3683 | /* wait for at least one interrupt */ | |
3684 | msleep(100); | |
3685 | ||
3686 | spin_lock_irq(&np->lock); | |
3687 | ||
3688 | /* flag should be set within ISR */ | |
3689 | testcnt = np->intr_test; | |
3690 | if (!testcnt) | |
3691 | ret = 2; | |
3692 | ||
3693 | nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER); | |
3694 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) | |
3695 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
3696 | else | |
3697 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | |
3698 | ||
3699 | spin_unlock_irq(&np->lock); | |
3700 | ||
3701 | nv_free_irq(dev); | |
3702 | ||
3703 | np->msi_flags = save_msi_flags; | |
3704 | ||
3705 | if (netif_running(dev)) { | |
3706 | writel(save_poll_interval, base + NvRegPollingInterval); | |
3707 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); | |
3708 | /* restore original irq */ | |
3709 | if (nv_request_irq(dev, 0)) | |
3710 | return 0; | |
3711 | } | |
3712 | ||
3713 | return ret; | |
3714 | } | |
3715 | ||
3716 | static int nv_loopback_test(struct net_device *dev) | |
3717 | { | |
3718 | struct fe_priv *np = netdev_priv(dev); | |
3719 | u8 __iomem *base = get_hwbase(dev); | |
3720 | struct sk_buff *tx_skb, *rx_skb; | |
3721 | dma_addr_t test_dma_addr; | |
3722 | u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); | |
f82a9352 | 3723 | u32 flags; |
9589c77a AA |
3724 | int len, i, pkt_len; |
3725 | u8 *pkt_data; | |
3726 | u32 filter_flags = 0; | |
3727 | u32 misc1_flags = 0; | |
3728 | int ret = 1; | |
3729 | ||
3730 | if (netif_running(dev)) { | |
3731 | nv_disable_irq(dev); | |
3732 | filter_flags = readl(base + NvRegPacketFilterFlags); | |
3733 | misc1_flags = readl(base + NvRegMisc1); | |
3734 | } else { | |
3735 | nv_txrx_reset(dev); | |
3736 | } | |
3737 | ||
3738 | /* reinit driver view of the rx queue */ | |
3739 | set_bufsize(dev); | |
3740 | nv_init_ring(dev); | |
3741 | ||
3742 | /* setup hardware for loopback */ | |
3743 | writel(NVREG_MISC1_FORCE, base + NvRegMisc1); | |
3744 | writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags); | |
3745 | ||
3746 | /* reinit nic view of the rx queue */ | |
3747 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
3748 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | |
3749 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | |
3750 | base + NvRegRingSizes); | |
3751 | pci_push(base); | |
3752 | ||
3753 | /* restart rx engine */ | |
3754 | nv_start_rx(dev); | |
3755 | nv_start_tx(dev); | |
3756 | ||
3757 | /* setup packet for tx */ | |
3758 | pkt_len = ETH_DATA_LEN; | |
3759 | tx_skb = dev_alloc_skb(pkt_len); | |
3760 | pkt_data = skb_put(tx_skb, pkt_len); | |
3761 | for (i = 0; i < pkt_len; i++) | |
3762 | pkt_data[i] = (u8)(i & 0xff); | |
3763 | test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, | |
3764 | tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); | |
3765 | ||
3766 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 SH |
3767 | np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); |
3768 | np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | |
9589c77a | 3769 | } else { |
f82a9352 SH |
3770 | np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32; |
3771 | np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; | |
3772 | np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); | |
9589c77a AA |
3773 | } |
3774 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
3775 | pci_push(get_hwbase(dev)); | |
3776 | ||
3777 | msleep(500); | |
3778 | ||
3779 | /* check for rx of the packet */ | |
3780 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | |
f82a9352 | 3781 | flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); |
9589c77a AA |
3782 | len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); |
3783 | ||
3784 | } else { | |
f82a9352 | 3785 | flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); |
9589c77a AA |
3786 | len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); |
3787 | } | |
3788 | ||
f82a9352 | 3789 | if (flags & NV_RX_AVAIL) { |
9589c77a AA |
3790 | ret = 0; |
3791 | } else if (np->desc_ver == DESC_VER_1) { | |
f82a9352 | 3792 | if (flags & NV_RX_ERROR) |
9589c77a AA |
3793 | ret = 0; |
3794 | } else { | |
f82a9352 | 3795 | if (flags & NV_RX2_ERROR) { |
9589c77a AA |
3796 | ret = 0; |
3797 | } | |
3798 | } | |
3799 | ||
3800 | if (ret) { | |
3801 | if (len != pkt_len) { | |
3802 | ret = 0; | |
3803 | dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n", | |
3804 | dev->name, len, pkt_len); | |
3805 | } else { | |
3806 | rx_skb = np->rx_skbuff[0]; | |
3807 | for (i = 0; i < pkt_len; i++) { | |
3808 | if (rx_skb->data[i] != (u8)(i & 0xff)) { | |
3809 | ret = 0; | |
3810 | dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n", | |
3811 | dev->name, i); | |
3812 | break; | |
3813 | } | |
3814 | } | |
3815 | } | |
3816 | } else { | |
3817 | dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name); | |
3818 | } | |
3819 | ||
3820 | pci_unmap_page(np->pci_dev, test_dma_addr, | |
3821 | tx_skb->end-tx_skb->data, | |
3822 | PCI_DMA_TODEVICE); | |
3823 | dev_kfree_skb_any(tx_skb); | |
3824 | ||
3825 | /* stop engines */ | |
3826 | nv_stop_rx(dev); | |
3827 | nv_stop_tx(dev); | |
3828 | nv_txrx_reset(dev); | |
3829 | /* drain rx queue */ | |
3830 | nv_drain_rx(dev); | |
3831 | nv_drain_tx(dev); | |
3832 | ||
3833 | if (netif_running(dev)) { | |
3834 | writel(misc1_flags, base + NvRegMisc1); | |
3835 | writel(filter_flags, base + NvRegPacketFilterFlags); | |
3836 | nv_enable_irq(dev); | |
3837 | } | |
3838 | ||
3839 | return ret; | |
3840 | } | |
3841 | ||
3842 | static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer) | |
3843 | { | |
3844 | struct fe_priv *np = netdev_priv(dev); | |
3845 | u8 __iomem *base = get_hwbase(dev); | |
3846 | int result; | |
3847 | memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64)); | |
3848 | ||
3849 | if (!nv_link_test(dev)) { | |
3850 | test->flags |= ETH_TEST_FL_FAILED; | |
3851 | buffer[0] = 1; | |
3852 | } | |
3853 | ||
3854 | if (test->flags & ETH_TEST_FL_OFFLINE) { | |
3855 | if (netif_running(dev)) { | |
3856 | netif_stop_queue(dev); | |
e27cdba5 | 3857 | netif_poll_disable(dev); |
58dfd9c1 | 3858 | netif_tx_lock_bh(dev); |
9589c77a AA |
3859 | spin_lock_irq(&np->lock); |
3860 | nv_disable_hw_interrupts(dev, np->irqmask); | |
3861 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | |
3862 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
3863 | } else { | |
3864 | writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | |
3865 | } | |
3866 | /* stop engines */ | |
3867 | nv_stop_rx(dev); | |
3868 | nv_stop_tx(dev); | |
3869 | nv_txrx_reset(dev); | |
3870 | /* drain rx queue */ | |
3871 | nv_drain_rx(dev); | |
3872 | nv_drain_tx(dev); | |
3873 | spin_unlock_irq(&np->lock); | |
58dfd9c1 | 3874 | netif_tx_unlock_bh(dev); |
9589c77a AA |
3875 | } |
3876 | ||
3877 | if (!nv_register_test(dev)) { | |
3878 | test->flags |= ETH_TEST_FL_FAILED; | |
3879 | buffer[1] = 1; | |
3880 | } | |
3881 | ||
3882 | result = nv_interrupt_test(dev); | |
3883 | if (result != 1) { | |
3884 | test->flags |= ETH_TEST_FL_FAILED; | |
3885 | buffer[2] = 1; | |
3886 | } | |
3887 | if (result == 0) { | |
3888 | /* bail out */ | |
3889 | return; | |
3890 | } | |
3891 | ||
3892 | if (!nv_loopback_test(dev)) { | |
3893 | test->flags |= ETH_TEST_FL_FAILED; | |
3894 | buffer[3] = 1; | |
3895 | } | |
3896 | ||
3897 | if (netif_running(dev)) { | |
3898 | /* reinit driver view of the rx queue */ | |
3899 | set_bufsize(dev); | |
3900 | if (nv_init_ring(dev)) { | |
3901 | if (!np->in_shutdown) | |
3902 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
3903 | } | |
3904 | /* reinit nic view of the rx queue */ | |
3905 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); | |
3906 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); | |
3907 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), | |
3908 | base + NvRegRingSizes); | |
3909 | pci_push(base); | |
3910 | writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
3911 | pci_push(base); | |
3912 | /* restart rx engine */ | |
3913 | nv_start_rx(dev); | |
3914 | nv_start_tx(dev); | |
3915 | netif_start_queue(dev); | |
e27cdba5 | 3916 | netif_poll_enable(dev); |
9589c77a AA |
3917 | nv_enable_hw_interrupts(dev, np->irqmask); |
3918 | } | |
3919 | } | |
3920 | } | |
3921 | ||
52da3578 AA |
3922 | static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer) |
3923 | { | |
3924 | switch (stringset) { | |
3925 | case ETH_SS_STATS: | |
3926 | memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str)); | |
3927 | break; | |
9589c77a AA |
3928 | case ETH_SS_TEST: |
3929 | memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str)); | |
3930 | break; | |
52da3578 AA |
3931 | } |
3932 | } | |
3933 | ||
1da177e4 LT |
3934 | static struct ethtool_ops ops = { |
3935 | .get_drvinfo = nv_get_drvinfo, | |
3936 | .get_link = ethtool_op_get_link, | |
3937 | .get_wol = nv_get_wol, | |
3938 | .set_wol = nv_set_wol, | |
3939 | .get_settings = nv_get_settings, | |
3940 | .set_settings = nv_set_settings, | |
dc8216c1 MS |
3941 | .get_regs_len = nv_get_regs_len, |
3942 | .get_regs = nv_get_regs, | |
3943 | .nway_reset = nv_nway_reset, | |
c704b856 | 3944 | .get_perm_addr = ethtool_op_get_perm_addr, |
0674d594 | 3945 | .get_tso = ethtool_op_get_tso, |
6a78814f | 3946 | .set_tso = nv_set_tso, |
eafa59f6 AA |
3947 | .get_ringparam = nv_get_ringparam, |
3948 | .set_ringparam = nv_set_ringparam, | |
b6d0773f AA |
3949 | .get_pauseparam = nv_get_pauseparam, |
3950 | .set_pauseparam = nv_set_pauseparam, | |
5ed2616f AA |
3951 | .get_rx_csum = nv_get_rx_csum, |
3952 | .set_rx_csum = nv_set_rx_csum, | |
3953 | .get_tx_csum = ethtool_op_get_tx_csum, | |
3954 | .set_tx_csum = nv_set_tx_csum, | |
3955 | .get_sg = ethtool_op_get_sg, | |
3956 | .set_sg = nv_set_sg, | |
52da3578 AA |
3957 | .get_strings = nv_get_strings, |
3958 | .get_stats_count = nv_get_stats_count, | |
3959 | .get_ethtool_stats = nv_get_ethtool_stats, | |
9589c77a AA |
3960 | .self_test_count = nv_self_test_count, |
3961 | .self_test = nv_self_test, | |
1da177e4 LT |
3962 | }; |
3963 | ||
ee407b02 AA |
3964 | static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) |
3965 | { | |
3966 | struct fe_priv *np = get_nvpriv(dev); | |
3967 | ||
3968 | spin_lock_irq(&np->lock); | |
3969 | ||
3970 | /* save vlan group */ | |
3971 | np->vlangrp = grp; | |
3972 | ||
3973 | if (grp) { | |
3974 | /* enable vlan on MAC */ | |
3975 | np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS; | |
3976 | } else { | |
3977 | /* disable vlan on MAC */ | |
3978 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; | |
3979 | np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; | |
3980 | } | |
3981 | ||
3982 | writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); | |
3983 | ||
3984 | spin_unlock_irq(&np->lock); | |
3985 | }; | |
3986 | ||
3987 | static void nv_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |
3988 | { | |
3989 | /* nothing to do */ | |
3990 | }; | |
3991 | ||
1da177e4 LT |
3992 | static int nv_open(struct net_device *dev) |
3993 | { | |
ac9c1897 | 3994 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 | 3995 | u8 __iomem *base = get_hwbase(dev); |
d33a73c8 AA |
3996 | int ret = 1; |
3997 | int oom, i; | |
1da177e4 LT |
3998 | |
3999 | dprintk(KERN_DEBUG "nv_open: begin\n"); | |
4000 | ||
f1489653 | 4001 | /* erase previous misconfiguration */ |
86a0f043 AA |
4002 | if (np->driver_data & DEV_HAS_POWER_CNTRL) |
4003 | nv_mac_reset(dev); | |
1da177e4 LT |
4004 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); |
4005 | writel(0, base + NvRegMulticastAddrB); | |
4006 | writel(0, base + NvRegMulticastMaskA); | |
4007 | writel(0, base + NvRegMulticastMaskB); | |
4008 | writel(0, base + NvRegPacketFilterFlags); | |
4009 | ||
4010 | writel(0, base + NvRegTransmitterControl); | |
4011 | writel(0, base + NvRegReceiverControl); | |
4012 | ||
4013 | writel(0, base + NvRegAdapterControl); | |
4014 | ||
eb91f61b AA |
4015 | if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) |
4016 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | |
4017 | ||
f1489653 | 4018 | /* initialize descriptor rings */ |
d81c0983 | 4019 | set_bufsize(dev); |
1da177e4 LT |
4020 | oom = nv_init_ring(dev); |
4021 | ||
4022 | writel(0, base + NvRegLinkSpeed); | |
5070d340 | 4023 | writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); |
1da177e4 LT |
4024 | nv_txrx_reset(dev); |
4025 | writel(0, base + NvRegUnknownSetupReg6); | |
4026 | ||
4027 | np->in_shutdown = 0; | |
4028 | ||
f1489653 | 4029 | /* give hw rings */ |
0832b25a | 4030 | setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); |
eafa59f6 | 4031 | writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), |
1da177e4 LT |
4032 | base + NvRegRingSizes); |
4033 | ||
1da177e4 | 4034 | writel(np->linkspeed, base + NvRegLinkSpeed); |
95d161cb AA |
4035 | if (np->desc_ver == DESC_VER_1) |
4036 | writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); | |
4037 | else | |
4038 | writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark); | |
8a4ae7f2 | 4039 | writel(np->txrxctl_bits, base + NvRegTxRxControl); |
ee407b02 | 4040 | writel(np->vlanctl_bits, base + NvRegVlanControl); |
1da177e4 | 4041 | pci_push(base); |
8a4ae7f2 | 4042 | writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); |
1da177e4 LT |
4043 | reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, |
4044 | NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, | |
4045 | KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); | |
4046 | ||
4047 | writel(0, base + NvRegUnknownSetupReg4); | |
4048 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
4049 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | |
4050 | ||
1da177e4 LT |
4051 | writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); |
4052 | writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); | |
4053 | writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); | |
d81c0983 | 4054 | writel(np->rx_buf_sz, base + NvRegOffloadConfig); |
1da177e4 LT |
4055 | |
4056 | writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus); | |
4057 | get_random_bytes(&i, sizeof(i)); | |
4058 | writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed); | |
9744e218 AA |
4059 | writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral); |
4060 | writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral); | |
a971c324 AA |
4061 | if (poll_interval == -1) { |
4062 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | |
4063 | writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); | |
4064 | else | |
4065 | writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); | |
4066 | } | |
4067 | else | |
4068 | writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); | |
1da177e4 LT |
4069 | writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); |
4070 | writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, | |
4071 | base + NvRegAdapterControl); | |
4072 | writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed); | |
4073 | writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4); | |
c42d9df9 AA |
4074 | if (np->wolenabled) |
4075 | writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); | |
1da177e4 LT |
4076 | |
4077 | i = readl(base + NvRegPowerState); | |
4078 | if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) | |
4079 | writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); | |
4080 | ||
4081 | pci_push(base); | |
4082 | udelay(10); | |
4083 | writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); | |
4084 | ||
84b3932b | 4085 | nv_disable_hw_interrupts(dev, np->irqmask); |
1da177e4 LT |
4086 | pci_push(base); |
4087 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | |
4088 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
4089 | pci_push(base); | |
4090 | ||
9589c77a | 4091 | if (nv_request_irq(dev, 0)) { |
84b3932b | 4092 | goto out_drain; |
d33a73c8 | 4093 | } |
1da177e4 LT |
4094 | |
4095 | /* ask for interrupts */ | |
84b3932b | 4096 | nv_enable_hw_interrupts(dev, np->irqmask); |
1da177e4 LT |
4097 | |
4098 | spin_lock_irq(&np->lock); | |
4099 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | |
4100 | writel(0, base + NvRegMulticastAddrB); | |
4101 | writel(0, base + NvRegMulticastMaskA); | |
4102 | writel(0, base + NvRegMulticastMaskB); | |
4103 | writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); | |
4104 | /* One manual link speed update: Interrupts are enabled, future link | |
4105 | * speed changes cause interrupts and are handled by nv_link_irq(). | |
4106 | */ | |
4107 | { | |
4108 | u32 miistat; | |
4109 | miistat = readl(base + NvRegMIIStatus); | |
4110 | writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); | |
4111 | dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat); | |
4112 | } | |
1b1b3c9b MS |
4113 | /* set linkspeed to invalid value, thus force nv_update_linkspeed |
4114 | * to init hw */ | |
4115 | np->linkspeed = 0; | |
1da177e4 LT |
4116 | ret = nv_update_linkspeed(dev); |
4117 | nv_start_rx(dev); | |
4118 | nv_start_tx(dev); | |
4119 | netif_start_queue(dev); | |
e27cdba5 SH |
4120 | netif_poll_enable(dev); |
4121 | ||
1da177e4 LT |
4122 | if (ret) { |
4123 | netif_carrier_on(dev); | |
4124 | } else { | |
4125 | printk("%s: no link during initialization.\n", dev->name); | |
4126 | netif_carrier_off(dev); | |
4127 | } | |
4128 | if (oom) | |
4129 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
52da3578 AA |
4130 | |
4131 | /* start statistics timer */ | |
4132 | if (np->driver_data & DEV_HAS_STATISTICS) | |
4133 | mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); | |
4134 | ||
1da177e4 LT |
4135 | spin_unlock_irq(&np->lock); |
4136 | ||
4137 | return 0; | |
4138 | out_drain: | |
4139 | drain_ring(dev); | |
4140 | return ret; | |
4141 | } | |
4142 | ||
4143 | static int nv_close(struct net_device *dev) | |
4144 | { | |
ac9c1897 | 4145 | struct fe_priv *np = netdev_priv(dev); |
1da177e4 LT |
4146 | u8 __iomem *base; |
4147 | ||
4148 | spin_lock_irq(&np->lock); | |
4149 | np->in_shutdown = 1; | |
4150 | spin_unlock_irq(&np->lock); | |
e27cdba5 | 4151 | netif_poll_disable(dev); |
1da177e4 LT |
4152 | synchronize_irq(dev->irq); |
4153 | ||
4154 | del_timer_sync(&np->oom_kick); | |
4155 | del_timer_sync(&np->nic_poll); | |
52da3578 | 4156 | del_timer_sync(&np->stats_poll); |
1da177e4 LT |
4157 | |
4158 | netif_stop_queue(dev); | |
4159 | spin_lock_irq(&np->lock); | |
4160 | nv_stop_tx(dev); | |
4161 | nv_stop_rx(dev); | |
4162 | nv_txrx_reset(dev); | |
4163 | ||
4164 | /* disable interrupts on the nic or we will lock up */ | |
4165 | base = get_hwbase(dev); | |
84b3932b | 4166 | nv_disable_hw_interrupts(dev, np->irqmask); |
1da177e4 LT |
4167 | pci_push(base); |
4168 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | |
4169 | ||
4170 | spin_unlock_irq(&np->lock); | |
4171 | ||
84b3932b | 4172 | nv_free_irq(dev); |
1da177e4 LT |
4173 | |
4174 | drain_ring(dev); | |
4175 | ||
4176 | if (np->wolenabled) | |
4177 | nv_start_rx(dev); | |
4178 | ||
4179 | /* FIXME: power down nic */ | |
4180 | ||
4181 | return 0; | |
4182 | } | |
4183 | ||
4184 | static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |
4185 | { | |
4186 | struct net_device *dev; | |
4187 | struct fe_priv *np; | |
4188 | unsigned long addr; | |
4189 | u8 __iomem *base; | |
4190 | int err, i; | |
5070d340 | 4191 | u32 powerstate, txreg; |
1da177e4 LT |
4192 | |
4193 | dev = alloc_etherdev(sizeof(struct fe_priv)); | |
4194 | err = -ENOMEM; | |
4195 | if (!dev) | |
4196 | goto out; | |
4197 | ||
ac9c1897 | 4198 | np = netdev_priv(dev); |
1da177e4 LT |
4199 | np->pci_dev = pci_dev; |
4200 | spin_lock_init(&np->lock); | |
4201 | SET_MODULE_OWNER(dev); | |
4202 | SET_NETDEV_DEV(dev, &pci_dev->dev); | |
4203 | ||
4204 | init_timer(&np->oom_kick); | |
4205 | np->oom_kick.data = (unsigned long) dev; | |
4206 | np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ | |
4207 | init_timer(&np->nic_poll); | |
4208 | np->nic_poll.data = (unsigned long) dev; | |
4209 | np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ | |
52da3578 AA |
4210 | init_timer(&np->stats_poll); |
4211 | np->stats_poll.data = (unsigned long) dev; | |
4212 | np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ | |
1da177e4 LT |
4213 | |
4214 | err = pci_enable_device(pci_dev); | |
4215 | if (err) { | |
4216 | printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", | |
4217 | err, pci_name(pci_dev)); | |
4218 | goto out_free; | |
4219 | } | |
4220 | ||
4221 | pci_set_master(pci_dev); | |
4222 | ||
4223 | err = pci_request_regions(pci_dev, DRV_NAME); | |
4224 | if (err < 0) | |
4225 | goto out_disable; | |
4226 | ||
52da3578 | 4227 | if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS)) |
86a0f043 AA |
4228 | np->register_size = NV_PCI_REGSZ_VER2; |
4229 | else | |
4230 | np->register_size = NV_PCI_REGSZ_VER1; | |
4231 | ||
1da177e4 LT |
4232 | err = -EINVAL; |
4233 | addr = 0; | |
4234 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
4235 | dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n", | |
4236 | pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i), | |
4237 | pci_resource_len(pci_dev, i), | |
4238 | pci_resource_flags(pci_dev, i)); | |
4239 | if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && | |
86a0f043 | 4240 | pci_resource_len(pci_dev, i) >= np->register_size) { |
1da177e4 LT |
4241 | addr = pci_resource_start(pci_dev, i); |
4242 | break; | |
4243 | } | |
4244 | } | |
4245 | if (i == DEVICE_COUNT_RESOURCE) { | |
4246 | printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", | |
4247 | pci_name(pci_dev)); | |
4248 | goto out_relreg; | |
4249 | } | |
4250 | ||
86a0f043 AA |
4251 | /* copy of driver data */ |
4252 | np->driver_data = id->driver_data; | |
4253 | ||
1da177e4 | 4254 | /* handle different descriptor versions */ |
ee73362c MS |
4255 | if (id->driver_data & DEV_HAS_HIGH_DMA) { |
4256 | /* packet format 3: supports 40-bit addressing */ | |
4257 | np->desc_ver = DESC_VER_3; | |
84b3932b | 4258 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; |
69fe3fd7 AA |
4259 | if (dma_64bit) { |
4260 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { | |
4261 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | |
4262 | pci_name(pci_dev)); | |
4263 | } else { | |
4264 | dev->features |= NETIF_F_HIGHDMA; | |
4265 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | |
4266 | } | |
4267 | if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { | |
4268 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n", | |
4269 | pci_name(pci_dev)); | |
4270 | } | |
ee73362c MS |
4271 | } |
4272 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | |
4273 | /* packet format 2: supports jumbo frames */ | |
1da177e4 | 4274 | np->desc_ver = DESC_VER_2; |
8a4ae7f2 | 4275 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; |
ee73362c MS |
4276 | } else { |
4277 | /* original packet format */ | |
4278 | np->desc_ver = DESC_VER_1; | |
8a4ae7f2 | 4279 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; |
d81c0983 | 4280 | } |
ee73362c MS |
4281 | |
4282 | np->pkt_limit = NV_PKTLIMIT_1; | |
4283 | if (id->driver_data & DEV_HAS_LARGEDESC) | |
4284 | np->pkt_limit = NV_PKTLIMIT_2; | |
4285 | ||
8a4ae7f2 MS |
4286 | if (id->driver_data & DEV_HAS_CHECKSUM) { |
4287 | np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; | |
ac9c1897 AA |
4288 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
4289 | #ifdef NETIF_F_TSO | |
fa45459e | 4290 | dev->features |= NETIF_F_TSO; |
ac9c1897 AA |
4291 | #endif |
4292 | } | |
8a4ae7f2 | 4293 | |
ee407b02 AA |
4294 | np->vlanctl_bits = 0; |
4295 | if (id->driver_data & DEV_HAS_VLAN) { | |
4296 | np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; | |
4297 | dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; | |
4298 | dev->vlan_rx_register = nv_vlan_rx_register; | |
4299 | dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; | |
4300 | } | |
4301 | ||
d33a73c8 | 4302 | np->msi_flags = 0; |
69fe3fd7 | 4303 | if ((id->driver_data & DEV_HAS_MSI) && msi) { |
d33a73c8 AA |
4304 | np->msi_flags |= NV_MSI_CAPABLE; |
4305 | } | |
69fe3fd7 | 4306 | if ((id->driver_data & DEV_HAS_MSI_X) && msix) { |
d33a73c8 AA |
4307 | np->msi_flags |= NV_MSI_X_CAPABLE; |
4308 | } | |
4309 | ||
b6d0773f | 4310 | np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; |
eb91f61b | 4311 | if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { |
b6d0773f | 4312 | np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; |
eb91f61b | 4313 | } |
f3b197ac | 4314 | |
eb91f61b | 4315 | |
1da177e4 | 4316 | err = -ENOMEM; |
86a0f043 | 4317 | np->base = ioremap(addr, np->register_size); |
1da177e4 LT |
4318 | if (!np->base) |
4319 | goto out_relreg; | |
4320 | dev->base_addr = (unsigned long)np->base; | |
ee73362c | 4321 | |
1da177e4 | 4322 | dev->irq = pci_dev->irq; |
ee73362c | 4323 | |
eafa59f6 AA |
4324 | np->rx_ring_size = RX_RING_DEFAULT; |
4325 | np->tx_ring_size = TX_RING_DEFAULT; | |
4326 | np->tx_limit_stop = np->tx_ring_size - TX_LIMIT_DIFFERENCE; | |
4327 | np->tx_limit_start = np->tx_ring_size - TX_LIMIT_DIFFERENCE - 1; | |
4328 | ||
ee73362c MS |
4329 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
4330 | np->rx_ring.orig = pci_alloc_consistent(pci_dev, | |
eafa59f6 | 4331 | sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), |
ee73362c MS |
4332 | &np->ring_addr); |
4333 | if (!np->rx_ring.orig) | |
4334 | goto out_unmap; | |
eafa59f6 | 4335 | np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; |
ee73362c MS |
4336 | } else { |
4337 | np->rx_ring.ex = pci_alloc_consistent(pci_dev, | |
eafa59f6 | 4338 | sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), |
ee73362c MS |
4339 | &np->ring_addr); |
4340 | if (!np->rx_ring.ex) | |
4341 | goto out_unmap; | |
eafa59f6 AA |
4342 | np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; |
4343 | } | |
4344 | np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL); | |
4345 | np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL); | |
4346 | np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL); | |
4347 | np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL); | |
4348 | np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL); | |
4349 | if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len) | |
4350 | goto out_freering; | |
4351 | memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size); | |
4352 | memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size); | |
4353 | memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size); | |
4354 | memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size); | |
4355 | memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size); | |
1da177e4 LT |
4356 | |
4357 | dev->open = nv_open; | |
4358 | dev->stop = nv_close; | |
4359 | dev->hard_start_xmit = nv_start_xmit; | |
4360 | dev->get_stats = nv_get_stats; | |
4361 | dev->change_mtu = nv_change_mtu; | |
72b31782 | 4362 | dev->set_mac_address = nv_set_mac_address; |
1da177e4 | 4363 | dev->set_multicast_list = nv_set_multicast; |
2918c35d MS |
4364 | #ifdef CONFIG_NET_POLL_CONTROLLER |
4365 | dev->poll_controller = nv_poll_controller; | |
e27cdba5 SH |
4366 | #endif |
4367 | dev->weight = 64; | |
4368 | #ifdef CONFIG_FORCEDETH_NAPI | |
4369 | dev->poll = nv_napi_poll; | |
2918c35d | 4370 | #endif |
1da177e4 LT |
4371 | SET_ETHTOOL_OPS(dev, &ops); |
4372 | dev->tx_timeout = nv_tx_timeout; | |
4373 | dev->watchdog_timeo = NV_WATCHDOG_TIMEO; | |
4374 | ||
4375 | pci_set_drvdata(pci_dev, dev); | |
4376 | ||
4377 | /* read the mac address */ | |
4378 | base = get_hwbase(dev); | |
4379 | np->orig_mac[0] = readl(base + NvRegMacAddrA); | |
4380 | np->orig_mac[1] = readl(base + NvRegMacAddrB); | |
4381 | ||
5070d340 AA |
4382 | /* check the workaround bit for correct mac address order */ |
4383 | txreg = readl(base + NvRegTransmitPoll); | |
4384 | if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { | |
4385 | /* mac address is already in correct order */ | |
4386 | dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; | |
4387 | dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; | |
4388 | dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; | |
4389 | dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; | |
4390 | dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; | |
4391 | dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; | |
4392 | } else { | |
4393 | /* need to reverse mac address to correct order */ | |
4394 | dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; | |
4395 | dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; | |
4396 | dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; | |
4397 | dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; | |
4398 | dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; | |
4399 | dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; | |
4400 | /* set permanent address to be correct aswell */ | |
4401 | np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + | |
4402 | (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24); | |
4403 | np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8); | |
4404 | writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); | |
4405 | } | |
c704b856 | 4406 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
1da177e4 | 4407 | |
c704b856 | 4408 | if (!is_valid_ether_addr(dev->perm_addr)) { |
1da177e4 LT |
4409 | /* |
4410 | * Bad mac address. At least one bios sets the mac address | |
4411 | * to 01:23:45:67:89:ab | |
4412 | */ | |
4413 | printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n", | |
4414 | pci_name(pci_dev), | |
4415 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | |
4416 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | |
4417 | printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); | |
4418 | dev->dev_addr[0] = 0x00; | |
4419 | dev->dev_addr[1] = 0x00; | |
4420 | dev->dev_addr[2] = 0x6c; | |
4421 | get_random_bytes(&dev->dev_addr[3], 3); | |
4422 | } | |
4423 | ||
4424 | dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev), | |
4425 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], | |
4426 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | |
4427 | ||
f1489653 AA |
4428 | /* set mac address */ |
4429 | nv_copy_mac_to_hw(dev); | |
4430 | ||
1da177e4 LT |
4431 | /* disable WOL */ |
4432 | writel(0, base + NvRegWakeUpFlags); | |
4433 | np->wolenabled = 0; | |
4434 | ||
86a0f043 AA |
4435 | if (id->driver_data & DEV_HAS_POWER_CNTRL) { |
4436 | u8 revision_id; | |
4437 | pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id); | |
4438 | ||
4439 | /* take phy and nic out of low power mode */ | |
4440 | powerstate = readl(base + NvRegPowerState2); | |
4441 | powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK; | |
4442 | if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 || | |
4443 | id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) && | |
4444 | revision_id >= 0xA3) | |
4445 | powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3; | |
4446 | writel(powerstate, base + NvRegPowerState2); | |
4447 | } | |
4448 | ||
1da177e4 | 4449 | if (np->desc_ver == DESC_VER_1) { |
ac9c1897 | 4450 | np->tx_flags = NV_TX_VALID; |
1da177e4 | 4451 | } else { |
ac9c1897 | 4452 | np->tx_flags = NV_TX2_VALID; |
1da177e4 | 4453 | } |
d33a73c8 | 4454 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { |
a971c324 | 4455 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
d33a73c8 AA |
4456 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
4457 | np->msi_flags |= 0x0003; | |
4458 | } else { | |
a971c324 | 4459 | np->irqmask = NVREG_IRQMASK_CPU; |
d33a73c8 AA |
4460 | if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ |
4461 | np->msi_flags |= 0x0001; | |
4462 | } | |
a971c324 | 4463 | |
1da177e4 LT |
4464 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
4465 | np->irqmask |= NVREG_IRQ_TIMER; | |
4466 | if (id->driver_data & DEV_NEED_LINKTIMER) { | |
4467 | dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev)); | |
4468 | np->need_linktimer = 1; | |
4469 | np->link_timeout = jiffies + LINK_TIMEOUT; | |
4470 | } else { | |
4471 | dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev)); | |
4472 | np->need_linktimer = 0; | |
4473 | } | |
4474 | ||
4475 | /* find a suitable phy */ | |
7a33e45a | 4476 | for (i = 1; i <= 32; i++) { |
1da177e4 | 4477 | int id1, id2; |
7a33e45a | 4478 | int phyaddr = i & 0x1F; |
1da177e4 LT |
4479 | |
4480 | spin_lock_irq(&np->lock); | |
7a33e45a | 4481 | id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ); |
1da177e4 LT |
4482 | spin_unlock_irq(&np->lock); |
4483 | if (id1 < 0 || id1 == 0xffff) | |
4484 | continue; | |
4485 | spin_lock_irq(&np->lock); | |
7a33e45a | 4486 | id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ); |
1da177e4 LT |
4487 | spin_unlock_irq(&np->lock); |
4488 | if (id2 < 0 || id2 == 0xffff) | |
4489 | continue; | |
4490 | ||
4491 | id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; | |
4492 | id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; | |
4493 | dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", | |
7a33e45a AA |
4494 | pci_name(pci_dev), id1, id2, phyaddr); |
4495 | np->phyaddr = phyaddr; | |
1da177e4 LT |
4496 | np->phy_oui = id1 | id2; |
4497 | break; | |
4498 | } | |
7a33e45a | 4499 | if (i == 33) { |
1da177e4 | 4500 | printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", |
7a33e45a | 4501 | pci_name(pci_dev)); |
eafa59f6 | 4502 | goto out_error; |
1da177e4 | 4503 | } |
f3b197ac | 4504 | |
7a33e45a AA |
4505 | /* reset it */ |
4506 | phy_init(dev); | |
1da177e4 LT |
4507 | |
4508 | /* set default link speed settings */ | |
4509 | np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; | |
4510 | np->duplex = 0; | |
4511 | np->autoneg = 1; | |
4512 | ||
4513 | err = register_netdev(dev); | |
4514 | if (err) { | |
4515 | printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); | |
eafa59f6 | 4516 | goto out_error; |
1da177e4 LT |
4517 | } |
4518 | printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", | |
4519 | dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, | |
4520 | pci_name(pci_dev)); | |
4521 | ||
4522 | return 0; | |
4523 | ||
eafa59f6 | 4524 | out_error: |
1da177e4 | 4525 | pci_set_drvdata(pci_dev, NULL); |
eafa59f6 AA |
4526 | out_freering: |
4527 | free_rings(dev); | |
1da177e4 LT |
4528 | out_unmap: |
4529 | iounmap(get_hwbase(dev)); | |
4530 | out_relreg: | |
4531 | pci_release_regions(pci_dev); | |
4532 | out_disable: | |
4533 | pci_disable_device(pci_dev); | |
4534 | out_free: | |
4535 | free_netdev(dev); | |
4536 | out: | |
4537 | return err; | |
4538 | } | |
4539 | ||
4540 | static void __devexit nv_remove(struct pci_dev *pci_dev) | |
4541 | { | |
4542 | struct net_device *dev = pci_get_drvdata(pci_dev); | |
f1489653 AA |
4543 | struct fe_priv *np = netdev_priv(dev); |
4544 | u8 __iomem *base = get_hwbase(dev); | |
1da177e4 LT |
4545 | |
4546 | unregister_netdev(dev); | |
4547 | ||
f1489653 AA |
4548 | /* special op: write back the misordered MAC address - otherwise |
4549 | * the next nv_probe would see a wrong address. | |
4550 | */ | |
4551 | writel(np->orig_mac[0], base + NvRegMacAddrA); | |
4552 | writel(np->orig_mac[1], base + NvRegMacAddrB); | |
4553 | ||
1da177e4 | 4554 | /* free all structures */ |
eafa59f6 | 4555 | free_rings(dev); |
1da177e4 LT |
4556 | iounmap(get_hwbase(dev)); |
4557 | pci_release_regions(pci_dev); | |
4558 | pci_disable_device(pci_dev); | |
4559 | free_netdev(dev); | |
4560 | pci_set_drvdata(pci_dev, NULL); | |
4561 | } | |
4562 | ||
4563 | static struct pci_device_id pci_tbl[] = { | |
4564 | { /* nForce Ethernet Controller */ | |
dc8216c1 | 4565 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1), |
c2dba06d | 4566 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
1da177e4 LT |
4567 | }, |
4568 | { /* nForce2 Ethernet Controller */ | |
dc8216c1 | 4569 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2), |
c2dba06d | 4570 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
1da177e4 LT |
4571 | }, |
4572 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 4573 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3), |
c2dba06d | 4574 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER, |
1da177e4 LT |
4575 | }, |
4576 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 4577 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), |
8a4ae7f2 | 4578 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
4579 | }, |
4580 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 4581 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), |
8a4ae7f2 | 4582 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
4583 | }, |
4584 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 4585 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), |
8a4ae7f2 | 4586 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
4587 | }, |
4588 | { /* nForce3 Ethernet Controller */ | |
dc8216c1 | 4589 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), |
8a4ae7f2 | 4590 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, |
1da177e4 LT |
4591 | }, |
4592 | { /* CK804 Ethernet Controller */ | |
dc8216c1 | 4593 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), |
8a4ae7f2 | 4594 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, |
1da177e4 LT |
4595 | }, |
4596 | { /* CK804 Ethernet Controller */ | |
dc8216c1 | 4597 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), |
8a4ae7f2 | 4598 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, |
1da177e4 LT |
4599 | }, |
4600 | { /* MCP04 Ethernet Controller */ | |
dc8216c1 | 4601 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), |
8a4ae7f2 | 4602 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, |
1da177e4 LT |
4603 | }, |
4604 | { /* MCP04 Ethernet Controller */ | |
dc8216c1 | 4605 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), |
8a4ae7f2 | 4606 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, |
1da177e4 | 4607 | }, |
9992d4aa | 4608 | { /* MCP51 Ethernet Controller */ |
dc8216c1 | 4609 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), |
86a0f043 | 4610 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, |
9992d4aa MS |
4611 | }, |
4612 | { /* MCP51 Ethernet Controller */ | |
dc8216c1 | 4613 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), |
86a0f043 | 4614 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL, |
9992d4aa | 4615 | }, |
f49d16ef | 4616 | { /* MCP55 Ethernet Controller */ |
dc8216c1 | 4617 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
9589c77a | 4618 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, |
f49d16ef MS |
4619 | }, |
4620 | { /* MCP55 Ethernet Controller */ | |
dc8216c1 | 4621 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
9589c77a | 4622 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, |
f49d16ef | 4623 | }, |
c99ce7ee AA |
4624 | { /* MCP61 Ethernet Controller */ |
4625 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), | |
4626 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | |
4627 | }, | |
4628 | { /* MCP61 Ethernet Controller */ | |
4629 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), | |
4630 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | |
4631 | }, | |
4632 | { /* MCP61 Ethernet Controller */ | |
4633 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), | |
4634 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | |
4635 | }, | |
4636 | { /* MCP61 Ethernet Controller */ | |
4637 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), | |
4638 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | |
4639 | }, | |
4640 | { /* MCP65 Ethernet Controller */ | |
4641 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), | |
4642 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | |
4643 | }, | |
4644 | { /* MCP65 Ethernet Controller */ | |
4645 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), | |
4646 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | |
4647 | }, | |
4648 | { /* MCP65 Ethernet Controller */ | |
4649 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), | |
4650 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | |
4651 | }, | |
4652 | { /* MCP65 Ethernet Controller */ | |
4653 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), | |
4654 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED, | |
4655 | }, | |
1da177e4 LT |
4656 | {0,}, |
4657 | }; | |
4658 | ||
4659 | static struct pci_driver driver = { | |
4660 | .name = "forcedeth", | |
4661 | .id_table = pci_tbl, | |
4662 | .probe = nv_probe, | |
4663 | .remove = __devexit_p(nv_remove), | |
4664 | }; | |
4665 | ||
4666 | ||
4667 | static int __init init_nic(void) | |
4668 | { | |
4669 | printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); | |
29917620 | 4670 | return pci_register_driver(&driver); |
1da177e4 LT |
4671 | } |
4672 | ||
4673 | static void __exit exit_nic(void) | |
4674 | { | |
4675 | pci_unregister_driver(&driver); | |
4676 | } | |
4677 | ||
4678 | module_param(max_interrupt_work, int, 0); | |
4679 | MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt"); | |
a971c324 AA |
4680 | module_param(optimization_mode, int, 0); |
4681 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); | |
4682 | module_param(poll_interval, int, 0); | |
4683 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); | |
69fe3fd7 AA |
4684 | module_param(msi, int, 0); |
4685 | MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0."); | |
4686 | module_param(msix, int, 0); | |
4687 | MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0."); | |
4688 | module_param(dma_64bit, int, 0); | |
4689 | MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0."); | |
1da177e4 LT |
4690 | |
4691 | MODULE_AUTHOR("Manfred Spraul <[email protected]>"); | |
4692 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); | |
4693 | MODULE_LICENSE("GPL"); | |
4694 | ||
4695 | MODULE_DEVICE_TABLE(pci, pci_tbl); | |
4696 | ||
4697 | module_init(init_nic); | |
4698 | module_exit(exit_nic); |