]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */ |
2 | /* | |
3 | Written 1996-1999 by Donald Becker. | |
4 | ||
5 | This software may be used and distributed according to the terms | |
6 | of the GNU General Public License, incorporated herein by reference. | |
7 | ||
8 | This driver is for the 3Com "Vortex" and "Boomerang" series ethercards. | |
9 | Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597 | |
10 | and the EtherLink XL 3c900 and 3c905 cards. | |
11 | ||
12 | Problem reports and questions should be directed to | |
13 | [email protected] | |
14 | ||
15 | The author may be reached as [email protected], or C/O | |
16 | Scyld Computing Corporation | |
17 | 410 Severn Ave., Suite 210 | |
18 | Annapolis MD 21403 | |
19 | ||
20 | Linux Kernel Additions: | |
21 | ||
22 | 0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates | |
23 | 0.99H+lk1.0 - Jeff Garzik <[email protected]> | |
24 | Remove compatibility defines for kernel versions < 2.2.x. | |
25 | Update for new 2.3.x module interface | |
26 | LK1.1.2 (March 19, 2000) | |
27 | * New PCI interface (jgarzik) | |
28 | ||
29 | LK1.1.3 25 April 2000, Andrew Morton <[email protected]> | |
30 | - Merged with 3c575_cb.c | |
31 | - Don't set RxComplete in boomerang interrupt enable reg | |
32 | - spinlock in vortex_timer to protect mdio functions | |
33 | - disable local interrupts around call to vortex_interrupt in | |
34 | vortex_tx_timeout() (So vortex_interrupt can use spin_lock()) | |
35 | - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl | |
36 | - In vortex_start_xmit(), move the lock to _after_ we've altered | |
37 | vp->cur_tx and vp->tx_full. This defeats the race between | |
38 | vortex_start_xmit() and vortex_interrupt which was identified | |
39 | by Bogdan Costescu. | |
40 | - Merged back support for six new cards from various sources | |
41 | - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus | |
42 | insertion oops) | |
43 | - Tell it that 3c905C has NWAY for 100bT autoneg | |
44 | - Fix handling of SetStatusEnd in 'Too much work..' code, as | |
45 | per 2.3.99's 3c575_cb (Dave Hinds). | |
46 | - Split ISR into two for vortex & boomerang | |
47 | - Fix MOD_INC/DEC races | |
48 | - Handle resource allocation failures. | |
49 | - Fix 3CCFE575CT LED polarity | |
50 | - Make tx_interrupt_mitigation the default | |
51 | ||
52 | LK1.1.4 25 April 2000, Andrew Morton <[email protected]> | |
53 | - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs. | |
54 | - Put vortex_info_tbl into __devinitdata | |
55 | - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well | |
56 | as in the hardware. | |
57 | - Increased the loop counter in issue_and_wait from 2,000 to 4,000. | |
58 | ||
59 | LK1.1.5 28 April 2000, andrewm | |
60 | - Added powerpc defines (John Daniel <[email protected]> said these work...) | |
61 | - Some extra diagnostics | |
62 | - In vortex_error(), reset the Tx on maxCollisions. Otherwise most | |
63 | chips usually get a Tx timeout. | |
64 | - Added extra_reset module parm | |
65 | - Replaced some inline timer manip with mod_timer | |
66 | (Franois romieu <[email protected]>) | |
67 | - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway | |
68 | (this came across from 3c575_cb). | |
69 | ||
70 | LK1.1.6 06 Jun 2000, andrewm | |
71 | - Backed out the PPC defines. | |
72 | - Use del_timer_sync(), mod_timer(). | |
73 | - Fix wrapped ulong comparison in boomerang_rx() | |
74 | - Add IS_TORNADO, use it to suppress 3c905C checksum error msg | |
75 | (Donald Becker, I Lee Hetherington <[email protected]>) | |
76 | - Replace union wn3_config with BFINS/BFEXT manipulation for | |
77 | sparc64 (Pete Zaitcev, Peter Jones) | |
78 | - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex): | |
79 | do a netif_wake_queue() to better recover from errors. (Anders Pedersen, | |
80 | Donald Becker) | |
81 | - Print a warning on out-of-memory (rate limited to 1 per 10 secs) | |
82 | - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland) | |
83 | ||
84 | LK1.1.7 2 Jul 2000 andrewm | |
85 | - Better handling of shared IRQs | |
86 | - Reset the transmitter on a Tx reclaim error | |
87 | - Fixed crash under OOM during vortex_open() (Mark Hemment) | |
88 | - Fix Rx cessation problem during OOM (help from Mark Hemment) | |
89 | - The spinlocks around the mdio access were blocking interrupts for 300uS. | |
90 | Fix all this to use spin_lock_bh() within mdio_read/write | |
91 | - Only write to TxFreeThreshold if it's a boomerang - other NICs don't | |
92 | have one. | |
93 | - Added 802.3x MAC-layer flow control support | |
94 | ||
95 | LK1.1.8 13 Aug 2000 andrewm | |
96 | - Ignore request_region() return value - already reserved if Cardbus. | |
97 | - Merged some additional Cardbus flags from Don's 0.99Qk | |
98 | - Some fixes for 3c556 (Fred Maciel) | |
99 | - Fix for EISA initialisation (Jan Rekorajski) | |
100 | - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers | |
101 | - Fixed MII_XCVR_PWR for 3CCFE575CT | |
102 | - Added INVERT_LED_PWR, used it. | |
103 | - Backed out the extra_reset stuff | |
104 | ||
105 | LK1.1.9 12 Sep 2000 andrewm | |
106 | - Backed out the tx_reset_resume flags. It was a no-op. | |
107 | - In vortex_error, don't reset the Tx on txReclaim errors | |
108 | - In vortex_error, don't reset the Tx on maxCollisions errors. | |
109 | Hence backed out all the DownListPtr logic here. | |
110 | - In vortex_error, give Tornado cards a partial TxReset on | |
111 | maxCollisions (David Hinds). Defined MAX_COLLISION_RESET for this. | |
112 | - Redid some driver flags and device names based on pcmcia_cs-3.1.20. | |
113 | - Fixed a bug where, if vp->tx_full is set when the interface | |
114 | is downed, it remains set when the interface is upped. Bad | |
115 | things happen. | |
116 | ||
117 | LK1.1.10 17 Sep 2000 andrewm | |
118 | - Added EEPROM_8BIT for 3c555 (Fred Maciel) | |
119 | - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg) | |
120 | - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO" | |
121 | ||
122 | LK1.1.11 13 Nov 2000 andrewm | |
123 | - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER | |
124 | ||
125 | LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1) | |
126 | - Call pci_enable_device before we request our IRQ (Tobias Ringstrom) | |
127 | - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra) | |
128 | - Added extended issue_and_wait for the 3c905CX. | |
129 | - Look for an MII on PHY index 24 first (3c905CX oddity). | |
130 | - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger) | |
131 | - Don't free skbs we don't own on oom path in vortex_open(). | |
132 | ||
133 | LK1.1.13 27 Jan 2001 | |
134 | - Added explicit `medialock' flag so we can truly | |
135 | lock the media type down with `options'. | |
136 | - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <[email protected]>) | |
137 | - Added and used EEPROM_NORESET for 3c556B PM resumes. | |
138 | - Fixed leakage of vp->rx_ring. | |
139 | - Break out separate HAS_HWCKSM device capability flag. | |
140 | - Kill vp->tx_full (ANK) | |
141 | - Merge zerocopy fragment handling (ANK?) | |
142 | ||
143 | LK1.1.14 15 Feb 2001 | |
144 | - Enable WOL. Can be turned on with `enable_wol' module option. | |
145 | - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul) | |
146 | - If a device's internalconfig register reports it has NWAY, | |
147 | use it, even if autoselect is enabled. | |
148 | ||
149 | LK1.1.15 6 June 2001 akpm | |
150 | - Prevent double counting of received bytes (Lars Christensen) | |
151 | - Add ethtool support (jgarzik) | |
152 | - Add module parm descriptions (Andrzej M. Krzysztofowicz) | |
153 | - Implemented alloc_etherdev() API | |
154 | - Special-case the 'Tx error 82' message. | |
155 | ||
156 | LK1.1.16 18 July 2001 akpm | |
157 | - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM | |
158 | - Lessen verbosity of bootup messages | |
159 | - Fix WOL - use new PM API functions. | |
160 | - Use netif_running() instead of vp->open in suspend/resume. | |
161 | - Don't reset the interface logic on open/close/rmmod. It upsets | |
162 | autonegotiation, and hence DHCP (from 0.99T). | |
163 | - Back out EEPROM_NORESET flag because of the above (we do it for all | |
164 | NICs). | |
165 | - Correct 3c982 identification string | |
166 | - Rename wait_for_completion() to issue_and_wait() to avoid completion.h | |
167 | clash. | |
168 | ||
169 | LK1.1.17 18Dec01 akpm | |
170 | - PCI ID 9805 is a Python-T, not a dual-port Cyclone. Apparently. | |
171 | And it has NWAY. | |
172 | - Mask our advertised modes (vp->advertising) with our capabilities | |
173 | (MII reg5) when deciding which duplex mode to use. | |
174 | - Add `global_options' as default for options[]. Ditto global_enable_wol, | |
175 | global_full_duplex. | |
176 | ||
177 | LK1.1.18 01Jul02 akpm | |
178 | - Fix for undocumented transceiver power-up bit on some 3c566B's | |
179 | (Donald Becker, Rahul Karnik) | |
180 | ||
181 | - See http://www.zip.com.au/~akpm/linux/#3c59x-2.3 for more details. | |
182 | - Also see Documentation/networking/vortex.txt | |
183 | ||
184 | LK1.1.19 10Nov02 Marc Zyngier <[email protected]> | |
185 | - EISA sysfs integration. | |
186 | */ | |
187 | ||
188 | /* | |
189 | * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation | |
190 | * as well as other drivers | |
191 | * | |
192 | * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k | |
193 | * due to dead code elimination. There will be some performance benefits from this due to | |
194 | * elimination of all the tests and reduced cache footprint. | |
195 | */ | |
196 | ||
197 | ||
198 | #define DRV_NAME "3c59x" | |
199 | #define DRV_VERSION "LK1.1.19" | |
200 | #define DRV_RELDATE "10 Nov 2002" | |
201 | ||
202 | ||
203 | ||
204 | /* A few values that may be tweaked. */ | |
205 | /* Keep the ring sizes a power of two for efficiency. */ | |
206 | #define TX_RING_SIZE 16 | |
207 | #define RX_RING_SIZE 32 | |
208 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | |
209 | ||
210 | /* "Knobs" that adjust features and parameters. */ | |
211 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | |
212 | Setting to > 1512 effectively disables this feature. */ | |
213 | #ifndef __arm__ | |
214 | static int rx_copybreak = 200; | |
215 | #else | |
216 | /* ARM systems perform better by disregarding the bus-master | |
217 | transfer capability of these cards. -- rmk */ | |
218 | static int rx_copybreak = 1513; | |
219 | #endif | |
220 | /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ | |
221 | static const int mtu = 1500; | |
222 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | |
223 | static int max_interrupt_work = 32; | |
224 | /* Tx timeout interval (millisecs) */ | |
225 | static int watchdog = 5000; | |
226 | ||
227 | /* Allow aggregation of Tx interrupts. Saves CPU load at the cost | |
228 | * of possible Tx stalls if the system is blocking interrupts | |
229 | * somewhere else. Undefine this to disable. | |
230 | */ | |
231 | #define tx_interrupt_mitigation 1 | |
232 | ||
233 | /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */ | |
234 | #define vortex_debug debug | |
235 | #ifdef VORTEX_DEBUG | |
236 | static int vortex_debug = VORTEX_DEBUG; | |
237 | #else | |
238 | static int vortex_debug = 1; | |
239 | #endif | |
240 | ||
241 | #include <linux/config.h> | |
242 | #include <linux/module.h> | |
243 | #include <linux/kernel.h> | |
244 | #include <linux/string.h> | |
245 | #include <linux/timer.h> | |
246 | #include <linux/errno.h> | |
247 | #include <linux/in.h> | |
248 | #include <linux/ioport.h> | |
249 | #include <linux/slab.h> | |
250 | #include <linux/interrupt.h> | |
251 | #include <linux/pci.h> | |
252 | #include <linux/mii.h> | |
253 | #include <linux/init.h> | |
254 | #include <linux/netdevice.h> | |
255 | #include <linux/etherdevice.h> | |
256 | #include <linux/skbuff.h> | |
257 | #include <linux/ethtool.h> | |
258 | #include <linux/highmem.h> | |
259 | #include <linux/eisa.h> | |
260 | #include <linux/bitops.h> | |
261 | #include <asm/irq.h> /* For NR_IRQS only. */ | |
262 | #include <asm/io.h> | |
263 | #include <asm/uaccess.h> | |
264 | ||
265 | /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. | |
266 | This is only in the support-all-kernels source code. */ | |
267 | ||
268 | #define RUN_AT(x) (jiffies + (x)) | |
269 | ||
270 | #include <linux/delay.h> | |
271 | ||
272 | ||
273 | static char version[] __devinitdata = | |
274 | DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; | |
275 | ||
276 | MODULE_AUTHOR("Donald Becker <[email protected]>"); | |
277 | MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver " | |
278 | DRV_VERSION " " DRV_RELDATE); | |
279 | MODULE_LICENSE("GPL"); | |
280 | MODULE_VERSION(DRV_VERSION); | |
281 | ||
282 | ||
283 | /* Operational parameter that usually are not changed. */ | |
284 | ||
285 | /* The Vortex size is twice that of the original EtherLinkIII series: the | |
286 | runtime register window, window 1, is now always mapped in. | |
287 | The Boomerang size is twice as large as the Vortex -- it has additional | |
288 | bus master control registers. */ | |
289 | #define VORTEX_TOTAL_SIZE 0x20 | |
290 | #define BOOMERANG_TOTAL_SIZE 0x40 | |
291 | ||
292 | /* Set iff a MII transceiver on any interface requires mdio preamble. | |
293 | This only set with the original DP83840 on older 3c905 boards, so the extra | |
294 | code size of a per-interface flag is not worthwhile. */ | |
295 | static char mii_preamble_required; | |
296 | ||
297 | #define PFX DRV_NAME ": " | |
298 | ||
299 | ||
300 | ||
301 | /* | |
302 | Theory of Operation | |
303 | ||
304 | I. Board Compatibility | |
305 | ||
306 | This device driver is designed for the 3Com FastEtherLink and FastEtherLink | |
307 | XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs | |
308 | versions of the FastEtherLink cards. The supported product IDs are | |
309 | 3c590, 3c592, 3c595, 3c597, 3c900, 3c905 | |
310 | ||
311 | The related ISA 3c515 is supported with a separate driver, 3c515.c, included | |
312 | with the kernel source or available from | |
313 | cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html | |
314 | ||
315 | II. Board-specific settings | |
316 | ||
317 | PCI bus devices are configured by the system at boot time, so no jumpers | |
318 | need to be set on the board. The system BIOS should be set to assign the | |
319 | PCI INTA signal to an otherwise unused system IRQ line. | |
320 | ||
321 | The EEPROM settings for media type and forced-full-duplex are observed. | |
322 | The EEPROM media type should be left at the default "autoselect" unless using | |
323 | 10base2 or AUI connections which cannot be reliably detected. | |
324 | ||
325 | III. Driver operation | |
326 | ||
327 | The 3c59x series use an interface that's very similar to the previous 3c5x9 | |
328 | series. The primary interface is two programmed-I/O FIFOs, with an | |
329 | alternate single-contiguous-region bus-master transfer (see next). | |
330 | ||
331 | The 3c900 "Boomerang" series uses a full-bus-master interface with separate | |
332 | lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, | |
333 | DEC Tulip and Intel Speedo3. The first chip version retains a compatible | |
334 | programmed-I/O interface that has been removed in 'B' and subsequent board | |
335 | revisions. | |
336 | ||
337 | One extension that is advertised in a very large font is that the adapters | |
338 | are capable of being bus masters. On the Vortex chip this capability was | |
339 | only for a single contiguous region making it far less useful than the full | |
340 | bus master capability. There is a significant performance impact of taking | |
341 | an extra interrupt or polling for the completion of each transfer, as well | |
342 | as difficulty sharing the single transfer engine between the transmit and | |
343 | receive threads. Using DMA transfers is a win only with large blocks or | |
344 | with the flawed versions of the Intel Orion motherboard PCI controller. | |
345 | ||
346 | The Boomerang chip's full-bus-master interface is useful, and has the | |
347 | currently-unused advantages over other similar chips that queued transmit | |
348 | packets may be reordered and receive buffer groups are associated with a | |
349 | single frame. | |
350 | ||
351 | With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme. | |
352 | Rather than a fixed intermediate receive buffer, this scheme allocates | |
353 | full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as | |
354 | the copying breakpoint: it is chosen to trade-off the memory wasted by | |
355 | passing the full-sized skbuff to the queue layer for all frames vs. the | |
356 | copying cost of copying a frame to a correctly-sized skbuff. | |
357 | ||
358 | IIIC. Synchronization | |
359 | The driver runs as two independent, single-threaded flows of control. One | |
360 | is the send-packet routine, which enforces single-threaded use by the | |
361 | dev->tbusy flag. The other thread is the interrupt handler, which is single | |
362 | threaded by the hardware and other software. | |
363 | ||
364 | IV. Notes | |
365 | ||
366 | Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development | |
367 | 3c590, 3c595, and 3c900 boards. | |
368 | The name "Vortex" is the internal 3Com project name for the PCI ASIC, and | |
369 | the EISA version is called "Demon". According to Terry these names come | |
370 | from rides at the local amusement park. | |
371 | ||
372 | The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes! | |
373 | This driver only supports ethernet packets because of the skbuff allocation | |
374 | limit of 4K. | |
375 | */ | |
376 | ||
377 | /* This table drives the PCI probe routines. It's mostly boilerplate in all | |
378 | of the drivers, and will likely be provided by some future kernel. | |
379 | */ | |
380 | enum pci_flags_bit { | |
381 | PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, | |
382 | PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3, | |
383 | }; | |
384 | ||
385 | enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, | |
386 | EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ | |
387 | HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, | |
388 | INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, | |
389 | EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000, | |
390 | EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, }; | |
391 | ||
392 | enum vortex_chips { | |
393 | CH_3C590 = 0, | |
394 | CH_3C592, | |
395 | CH_3C597, | |
396 | CH_3C595_1, | |
397 | CH_3C595_2, | |
398 | ||
399 | CH_3C595_3, | |
400 | CH_3C900_1, | |
401 | CH_3C900_2, | |
402 | CH_3C900_3, | |
403 | CH_3C900_4, | |
404 | ||
405 | CH_3C900_5, | |
406 | CH_3C900B_FL, | |
407 | CH_3C905_1, | |
408 | CH_3C905_2, | |
409 | CH_3C905B_1, | |
410 | ||
411 | CH_3C905B_2, | |
412 | CH_3C905B_FX, | |
413 | CH_3C905C, | |
414 | CH_3C9202, | |
415 | CH_3C980, | |
416 | CH_3C9805, | |
417 | ||
418 | CH_3CSOHO100_TX, | |
419 | CH_3C555, | |
420 | CH_3C556, | |
421 | CH_3C556B, | |
422 | CH_3C575, | |
423 | ||
424 | CH_3C575_1, | |
425 | CH_3CCFE575, | |
426 | CH_3CCFE575CT, | |
427 | CH_3CCFE656, | |
428 | CH_3CCFEM656, | |
429 | ||
430 | CH_3CCFEM656_1, | |
431 | CH_3C450, | |
432 | CH_3C920, | |
433 | CH_3C982A, | |
434 | CH_3C982B, | |
435 | ||
436 | CH_905BT4, | |
437 | CH_920B_EMB_WNM, | |
438 | }; | |
439 | ||
440 | ||
441 | /* note: this array directly indexed by above enums, and MUST | |
442 | * be kept in sync with both the enums above, and the PCI device | |
443 | * table below | |
444 | */ | |
445 | static struct vortex_chip_info { | |
446 | const char *name; | |
447 | int flags; | |
448 | int drv_flags; | |
449 | int io_size; | |
450 | } vortex_info_tbl[] __devinitdata = { | |
451 | {"3c590 Vortex 10Mbps", | |
452 | PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, | |
453 | {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ | |
454 | PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, | |
455 | {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ | |
456 | PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, | |
457 | {"3c595 Vortex 100baseTx", | |
458 | PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, | |
459 | {"3c595 Vortex 100baseT4", | |
460 | PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, | |
461 | ||
462 | {"3c595 Vortex 100base-MII", | |
463 | PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, }, | |
464 | {"3c900 Boomerang 10baseT", | |
465 | PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, | |
466 | {"3c900 Boomerang 10Mbps Combo", | |
467 | PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, | |
468 | {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ | |
469 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | |
470 | {"3c900 Cyclone 10Mbps Combo", | |
471 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | |
472 | ||
473 | {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ | |
474 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | |
475 | {"3c900B-FL Cyclone 10base-FL", | |
476 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | |
477 | {"3c905 Boomerang 100baseTx", | |
478 | PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, | |
479 | {"3c905 Boomerang 100baseT4", | |
480 | PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, | |
481 | {"3c905B Cyclone 100baseTx", | |
482 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | |
483 | ||
484 | {"3c905B Cyclone 10/100/BNC", | |
485 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, | |
486 | {"3c905B-FX Cyclone 100baseFx", | |
487 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | |
488 | {"3c905C Tornado", | |
489 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | |
490 | {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", | |
491 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, | |
492 | {"3c980 Cyclone", | |
493 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | |
494 | ||
495 | {"3c980C Python-T", | |
496 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, | |
497 | {"3cSOHO100-TX Hurricane", | |
498 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, | |
499 | {"3c555 Laptop Hurricane", | |
500 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, | |
501 | {"3c556 Laptop Tornado", | |
502 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| | |
503 | HAS_HWCKSM, 128, }, | |
504 | {"3c556B Laptop Hurricane", | |
505 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| | |
506 | WNO_XCVR_PWR|HAS_HWCKSM, 128, }, | |
507 | ||
508 | {"3c575 [Megahertz] 10/100 LAN CardBus", | |
509 | PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, | |
510 | {"3c575 Boomerang CardBus", | |
511 | PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, | |
512 | {"3CCFE575BT Cyclone CardBus", | |
513 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| | |
514 | INVERT_LED_PWR|HAS_HWCKSM, 128, }, | |
515 | {"3CCFE575CT Tornado CardBus", | |
516 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| | |
517 | MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, | |
518 | {"3CCFE656 Cyclone CardBus", | |
519 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| | |
520 | INVERT_LED_PWR|HAS_HWCKSM, 128, }, | |
521 | ||
522 | {"3CCFEM656B Cyclone+Winmodem CardBus", | |
523 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| | |
524 | INVERT_LED_PWR|HAS_HWCKSM, 128, }, | |
525 | {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ | |
526 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| | |
527 | MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, | |
528 | {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ | |
529 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, | |
530 | {"3c920 Tornado", | |
531 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, | |
532 | {"3c982 Hydra Dual Port A", | |
533 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, | |
534 | ||
535 | {"3c982 Hydra Dual Port B", | |
536 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, | |
537 | {"3c905B-T4", | |
538 | PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | |
539 | {"3c920B-EMB-WNM Tornado", | |
540 | PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, | |
541 | ||
542 | {NULL,}, /* NULL terminated list. */ | |
543 | }; | |
544 | ||
545 | ||
546 | static struct pci_device_id vortex_pci_tbl[] = { | |
547 | { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, | |
548 | { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, | |
549 | { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, | |
550 | { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 }, | |
551 | { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 }, | |
552 | ||
553 | { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 }, | |
554 | { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 }, | |
555 | { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 }, | |
556 | { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 }, | |
557 | { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 }, | |
558 | ||
559 | { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 }, | |
560 | { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, | |
561 | { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, | |
562 | { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, | |
563 | { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, | |
564 | ||
565 | { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, | |
566 | { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, | |
567 | { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, | |
568 | { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 }, | |
569 | { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, | |
570 | { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, | |
571 | ||
572 | { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX }, | |
573 | { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 }, | |
574 | { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 }, | |
575 | { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B }, | |
576 | { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 }, | |
577 | ||
578 | { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 }, | |
579 | { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 }, | |
580 | { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT }, | |
581 | { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 }, | |
582 | { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 }, | |
583 | ||
584 | { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, | |
585 | { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, | |
586 | { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 }, | |
587 | { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A }, | |
588 | { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B }, | |
589 | ||
590 | { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 }, | |
591 | { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM }, | |
592 | ||
593 | {0,} /* 0 terminated list. */ | |
594 | }; | |
595 | MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); | |
596 | ||
597 | ||
598 | /* Operational definitions. | |
599 | These are not used by other compilation units and thus are not | |
600 | exported in a ".h" file. | |
601 | ||
602 | First the windows. There are eight register windows, with the command | |
603 | and status registers available in each. | |
604 | */ | |
605 | #define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) | |
606 | #define EL3_CMD 0x0e | |
607 | #define EL3_STATUS 0x0e | |
608 | ||
609 | /* The top five bits written to EL3_CMD are a command, the lower | |
610 | 11 bits are the parameter, if applicable. | |
611 | Note that 11 parameters bits was fine for ethernet, but the new chip | |
612 | can handle FDDI length frames (~4500 octets) and now parameters count | |
613 | 32-bit 'Dwords' rather than octets. */ | |
614 | ||
615 | enum vortex_cmd { | |
616 | TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, | |
617 | RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, | |
618 | UpStall = 6<<11, UpUnstall = (6<<11)+1, | |
619 | DownStall = (6<<11)+2, DownUnstall = (6<<11)+3, | |
620 | RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, | |
621 | FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, | |
622 | SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, | |
623 | SetTxThreshold = 18<<11, SetTxStart = 19<<11, | |
624 | StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11, | |
625 | StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,}; | |
626 | ||
627 | /* The SetRxFilter command accepts the following classes: */ | |
628 | enum RxFilter { | |
629 | RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; | |
630 | ||
631 | /* Bits in the general status register. */ | |
632 | enum vortex_status { | |
633 | IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004, | |
634 | TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, | |
635 | IntReq = 0x0040, StatsFull = 0x0080, | |
636 | DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10, | |
637 | DMAInProgress = 1<<11, /* DMA controller is still busy.*/ | |
638 | CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/ | |
639 | }; | |
640 | ||
641 | /* Register window 1 offsets, the window used in normal operation. | |
642 | On the Vortex this window is always mapped at offsets 0x10-0x1f. */ | |
643 | enum Window1 { | |
644 | TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, | |
645 | RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, | |
646 | TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ | |
647 | }; | |
648 | enum Window0 { | |
649 | Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ | |
650 | Wn0EepromData = 12, /* Window 0: EEPROM results register. */ | |
651 | IntrStatus=0x0E, /* Valid in all windows. */ | |
652 | }; | |
653 | enum Win0_EEPROM_bits { | |
654 | EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, | |
655 | EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ | |
656 | EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ | |
657 | }; | |
658 | /* EEPROM locations. */ | |
659 | enum eeprom_offset { | |
660 | PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3, | |
661 | EtherLink3ID=7, IFXcvrIO=8, IRQLine=9, | |
662 | NodeAddr01=10, NodeAddr23=11, NodeAddr45=12, | |
663 | DriverTune=13, Checksum=15}; | |
664 | ||
665 | enum Window2 { /* Window 2. */ | |
666 | Wn2_ResetOptions=12, | |
667 | }; | |
668 | enum Window3 { /* Window 3: MAC/config bits. */ | |
669 | Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8, | |
670 | }; | |
671 | ||
672 | #define BFEXT(value, offset, bitcount) \ | |
673 | ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1)) | |
674 | ||
675 | #define BFINS(lhs, rhs, offset, bitcount) \ | |
676 | (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \ | |
677 | (((rhs) & ((1 << (bitcount)) - 1)) << (offset))) | |
678 | ||
679 | #define RAM_SIZE(v) BFEXT(v, 0, 3) | |
680 | #define RAM_WIDTH(v) BFEXT(v, 3, 1) | |
681 | #define RAM_SPEED(v) BFEXT(v, 4, 2) | |
682 | #define ROM_SIZE(v) BFEXT(v, 6, 2) | |
683 | #define RAM_SPLIT(v) BFEXT(v, 16, 2) | |
684 | #define XCVR(v) BFEXT(v, 20, 4) | |
685 | #define AUTOSELECT(v) BFEXT(v, 24, 1) | |
686 | ||
687 | enum Window4 { /* Window 4: Xcvr/media bits. */ | |
688 | Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, | |
689 | }; | |
690 | enum Win4_Media_bits { | |
691 | Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ | |
692 | Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ | |
693 | Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ | |
694 | Media_LnkBeat = 0x0800, | |
695 | }; | |
696 | enum Window7 { /* Window 7: Bus Master control. */ | |
697 | Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6, | |
698 | Wn7_MasterStatus = 12, | |
699 | }; | |
700 | /* Boomerang bus master control registers. */ | |
701 | enum MasterCtrl { | |
702 | PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c, | |
703 | TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38, | |
704 | }; | |
705 | ||
706 | /* The Rx and Tx descriptor lists. | |
707 | Caution Alpha hackers: these types are 32 bits! Note also the 8 byte | |
708 | alignment contraint on tx_ring[] and rx_ring[]. */ | |
709 | #define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ | |
710 | #define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ | |
711 | struct boom_rx_desc { | |
712 | u32 next; /* Last entry points to 0. */ | |
713 | s32 status; | |
714 | u32 addr; /* Up to 63 addr/len pairs possible. */ | |
715 | s32 length; /* Set LAST_FRAG to indicate last pair. */ | |
716 | }; | |
717 | /* Values for the Rx status entry. */ | |
718 | enum rx_desc_status { | |
719 | RxDComplete=0x00008000, RxDError=0x4000, | |
720 | /* See boomerang_rx() for actual error bits */ | |
721 | IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27, | |
722 | IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31, | |
723 | }; | |
724 | ||
725 | #ifdef MAX_SKB_FRAGS | |
726 | #define DO_ZEROCOPY 1 | |
727 | #else | |
728 | #define DO_ZEROCOPY 0 | |
729 | #endif | |
730 | ||
731 | struct boom_tx_desc { | |
732 | u32 next; /* Last entry points to 0. */ | |
733 | s32 status; /* bits 0:12 length, others see below. */ | |
734 | #if DO_ZEROCOPY | |
735 | struct { | |
736 | u32 addr; | |
737 | s32 length; | |
738 | } frag[1+MAX_SKB_FRAGS]; | |
739 | #else | |
740 | u32 addr; | |
741 | s32 length; | |
742 | #endif | |
743 | }; | |
744 | ||
745 | /* Values for the Tx status entry. */ | |
746 | enum tx_desc_status { | |
747 | CRCDisable=0x2000, TxDComplete=0x8000, | |
748 | AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000, | |
749 | TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */ | |
750 | }; | |
751 | ||
752 | /* Chip features we care about in vp->capabilities, read from the EEPROM. */ | |
753 | enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; | |
754 | ||
755 | struct vortex_extra_stats { | |
756 | unsigned long tx_deferred; | |
757 | unsigned long tx_multiple_collisions; | |
758 | unsigned long rx_bad_ssd; | |
759 | }; | |
760 | ||
761 | struct vortex_private { | |
762 | /* The Rx and Tx rings should be quad-word-aligned. */ | |
763 | struct boom_rx_desc* rx_ring; | |
764 | struct boom_tx_desc* tx_ring; | |
765 | dma_addr_t rx_ring_dma; | |
766 | dma_addr_t tx_ring_dma; | |
767 | /* The addresses of transmit- and receive-in-place skbuffs. */ | |
768 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | |
769 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | |
770 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ | |
771 | unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ | |
772 | struct net_device_stats stats; /* Generic stats */ | |
773 | struct vortex_extra_stats xstats; /* NIC-specific extra stats */ | |
774 | struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ | |
775 | dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ | |
776 | ||
777 | /* PCI configuration space information. */ | |
778 | struct device *gendev; | |
779 | char __iomem *cb_fn_base; /* CardBus function status addr space. */ | |
780 | ||
781 | /* Some values here only for performance evaluation and path-coverage */ | |
782 | int rx_nocopy, rx_copy, queued_packet, rx_csumhits; | |
783 | int card_idx; | |
784 | ||
785 | /* The remainder are related to chip state, mostly media selection. */ | |
786 | struct timer_list timer; /* Media selection timer. */ | |
787 | struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ | |
788 | int options; /* User-settable misc. driver options. */ | |
789 | unsigned int media_override:4, /* Passed-in media type. */ | |
790 | default_media:4, /* Read from the EEPROM/Wn3_Config. */ | |
791 | full_duplex:1, force_fd:1, autoselect:1, | |
792 | bus_master:1, /* Vortex can only do a fragment bus-m. */ | |
793 | full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ | |
794 | flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ | |
795 | partner_flow_ctrl:1, /* Partner supports flow control */ | |
796 | has_nway:1, | |
797 | enable_wol:1, /* Wake-on-LAN is enabled */ | |
798 | pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ | |
799 | open:1, | |
800 | medialock:1, | |
801 | must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ | |
802 | large_frames:1; /* accept large frames */ | |
803 | int drv_flags; | |
804 | u16 status_enable; | |
805 | u16 intr_enable; | |
806 | u16 available_media; /* From Wn3_Options. */ | |
807 | u16 capabilities, info1, info2; /* Various, from EEPROM. */ | |
808 | u16 advertising; /* NWay media advertisement */ | |
809 | unsigned char phys[2]; /* MII device addresses. */ | |
810 | u16 deferred; /* Resend these interrupts when we | |
811 | * bale from the ISR */ | |
812 | u16 io_size; /* Size of PCI region (for release_region) */ | |
813 | spinlock_t lock; /* Serialise access to device & its vortex_private */ | |
814 | struct mii_if_info mii; /* MII lib hooks/info */ | |
815 | }; | |
816 | ||
817 | #ifdef CONFIG_PCI | |
818 | #define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) | |
819 | #else | |
820 | #define DEVICE_PCI(dev) NULL | |
821 | #endif | |
822 | ||
823 | #define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL) | |
824 | ||
825 | #ifdef CONFIG_EISA | |
826 | #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) | |
827 | #else | |
828 | #define DEVICE_EISA(dev) NULL | |
829 | #endif | |
830 | ||
831 | #define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL) | |
832 | ||
833 | /* The action to take with a media selection timer tick. | |
834 | Note that we deviate from the 3Com order by checking 10base2 before AUI. | |
835 | */ | |
836 | enum xcvr_types { | |
837 | XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, | |
838 | XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, | |
839 | }; | |
840 | ||
841 | static struct media_table { | |
842 | char *name; | |
843 | unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ | |
844 | mask:8, /* The transceiver-present bit in Wn3_Config.*/ | |
845 | next:8; /* The media type to try next. */ | |
846 | int wait; /* Time before we check media status. */ | |
847 | } media_tbl[] = { | |
848 | { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10}, | |
849 | { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10}, | |
850 | { "undefined", 0, 0x80, XCVR_10baseT, 10000}, | |
851 | { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10}, | |
852 | { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10}, | |
853 | { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10}, | |
854 | { "MII", 0, 0x41, XCVR_10baseT, 3*HZ }, | |
855 | { "undefined", 0, 0x01, XCVR_10baseT, 10000}, | |
856 | { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ}, | |
857 | { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ }, | |
858 | { "Default", 0, 0xFF, XCVR_10baseT, 10000}, | |
859 | }; | |
860 | ||
861 | static struct { | |
862 | const char str[ETH_GSTRING_LEN]; | |
863 | } ethtool_stats_keys[] = { | |
864 | { "tx_deferred" }, | |
865 | { "tx_multiple_collisions" }, | |
866 | { "rx_bad_ssd" }, | |
867 | }; | |
868 | ||
869 | /* number of ETHTOOL_GSTATS u64's */ | |
870 | #define VORTEX_NUM_STATS 3 | |
871 | ||
872 | static int vortex_probe1(struct device *gendev, long ioaddr, int irq, | |
873 | int chip_idx, int card_idx); | |
874 | static void vortex_up(struct net_device *dev); | |
875 | static void vortex_down(struct net_device *dev, int final); | |
876 | static int vortex_open(struct net_device *dev); | |
877 | static void mdio_sync(long ioaddr, int bits); | |
878 | static int mdio_read(struct net_device *dev, int phy_id, int location); | |
879 | static void mdio_write(struct net_device *vp, int phy_id, int location, int value); | |
880 | static void vortex_timer(unsigned long arg); | |
881 | static void rx_oom_timer(unsigned long arg); | |
882 | static int vortex_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
883 | static int boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev); | |
884 | static int vortex_rx(struct net_device *dev); | |
885 | static int boomerang_rx(struct net_device *dev); | |
886 | static irqreturn_t vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs); | |
887 | static irqreturn_t boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs); | |
888 | static int vortex_close(struct net_device *dev); | |
889 | static void dump_tx_ring(struct net_device *dev); | |
890 | static void update_stats(long ioaddr, struct net_device *dev); | |
891 | static struct net_device_stats *vortex_get_stats(struct net_device *dev); | |
892 | static void set_rx_mode(struct net_device *dev); | |
893 | #ifdef CONFIG_PCI | |
894 | static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | |
895 | #endif | |
896 | static void vortex_tx_timeout(struct net_device *dev); | |
897 | static void acpi_set_WOL(struct net_device *dev); | |
898 | static struct ethtool_ops vortex_ethtool_ops; | |
899 | static void set_8021q_mode(struct net_device *dev, int enable); | |
900 | ||
901 | \f | |
902 | /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ | |
903 | /* Option count limit only -- unlimited interfaces are supported. */ | |
904 | #define MAX_UNITS 8 | |
905 | static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,}; | |
906 | static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |
907 | static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |
908 | static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |
909 | static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |
910 | static int global_options = -1; | |
911 | static int global_full_duplex = -1; | |
912 | static int global_enable_wol = -1; | |
913 | ||
914 | /* #define dev_alloc_skb dev_alloc_skb_debug */ | |
915 | ||
916 | /* Variables to work-around the Compaq PCI BIOS32 problem. */ | |
917 | static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; | |
918 | static struct net_device *compaq_net_device; | |
919 | ||
920 | static int vortex_cards_found; | |
921 | ||
922 | module_param(debug, int, 0); | |
923 | module_param(global_options, int, 0); | |
924 | module_param_array(options, int, NULL, 0); | |
925 | module_param(global_full_duplex, int, 0); | |
926 | module_param_array(full_duplex, int, NULL, 0); | |
927 | module_param_array(hw_checksums, int, NULL, 0); | |
928 | module_param_array(flow_ctrl, int, NULL, 0); | |
929 | module_param(global_enable_wol, int, 0); | |
930 | module_param_array(enable_wol, int, NULL, 0); | |
931 | module_param(rx_copybreak, int, 0); | |
932 | module_param(max_interrupt_work, int, 0); | |
933 | module_param(compaq_ioaddr, int, 0); | |
934 | module_param(compaq_irq, int, 0); | |
935 | module_param(compaq_device_id, int, 0); | |
936 | module_param(watchdog, int, 0); | |
937 | MODULE_PARM_DESC(debug, "3c59x debug level (0-6)"); | |
938 | MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex"); | |
939 | MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset"); | |
940 | MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)"); | |
941 | MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if options is unset"); | |
942 | MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)"); | |
943 | MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)"); | |
944 | MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)"); | |
945 | MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if options is unset"); | |
946 | MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames"); | |
947 | MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt"); | |
948 | MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)"); | |
949 | MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)"); | |
950 | MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)"); | |
951 | MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds"); | |
952 | ||
953 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
954 | static void poll_vortex(struct net_device *dev) | |
955 | { | |
956 | struct vortex_private *vp = netdev_priv(dev); | |
957 | unsigned long flags; | |
958 | local_save_flags(flags); | |
959 | local_irq_disable(); | |
960 | (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev,NULL); | |
961 | local_irq_restore(flags); | |
962 | } | |
963 | #endif | |
964 | ||
965 | #ifdef CONFIG_PM | |
966 | ||
967 | static int vortex_suspend (struct pci_dev *pdev, pm_message_t state) | |
968 | { | |
969 | struct net_device *dev = pci_get_drvdata(pdev); | |
970 | ||
971 | if (dev && dev->priv) { | |
972 | if (netif_running(dev)) { | |
973 | netif_device_detach(dev); | |
974 | vortex_down(dev, 1); | |
975 | } | |
976 | } | |
977 | return 0; | |
978 | } | |
979 | ||
980 | static int vortex_resume (struct pci_dev *pdev) | |
981 | { | |
982 | struct net_device *dev = pci_get_drvdata(pdev); | |
983 | ||
984 | if (dev && dev->priv) { | |
985 | if (netif_running(dev)) { | |
986 | vortex_up(dev); | |
987 | netif_device_attach(dev); | |
988 | } | |
989 | } | |
990 | return 0; | |
991 | } | |
992 | ||
993 | #endif /* CONFIG_PM */ | |
994 | ||
995 | #ifdef CONFIG_EISA | |
996 | static struct eisa_device_id vortex_eisa_ids[] = { | |
997 | { "TCM5920", CH_3C592 }, | |
998 | { "TCM5970", CH_3C597 }, | |
999 | { "" } | |
1000 | }; | |
1001 | ||
1002 | static int vortex_eisa_probe (struct device *device); | |
1003 | static int vortex_eisa_remove (struct device *device); | |
1004 | ||
1005 | static struct eisa_driver vortex_eisa_driver = { | |
1006 | .id_table = vortex_eisa_ids, | |
1007 | .driver = { | |
1008 | .name = "3c59x", | |
1009 | .probe = vortex_eisa_probe, | |
1010 | .remove = vortex_eisa_remove | |
1011 | } | |
1012 | }; | |
1013 | ||
1014 | static int vortex_eisa_probe (struct device *device) | |
1015 | { | |
1016 | long ioaddr; | |
1017 | struct eisa_device *edev; | |
1018 | ||
1019 | edev = to_eisa_device (device); | |
1020 | ioaddr = edev->base_addr; | |
1021 | ||
1022 | if (!request_region(ioaddr, VORTEX_TOTAL_SIZE, DRV_NAME)) | |
1023 | return -EBUSY; | |
1024 | ||
1025 | if (vortex_probe1(device, ioaddr, inw(ioaddr + 0xC88) >> 12, | |
1026 | edev->id.driver_data, vortex_cards_found)) { | |
1027 | release_region (ioaddr, VORTEX_TOTAL_SIZE); | |
1028 | return -ENODEV; | |
1029 | } | |
1030 | ||
1031 | vortex_cards_found++; | |
1032 | ||
1033 | return 0; | |
1034 | } | |
1035 | ||
1036 | static int vortex_eisa_remove (struct device *device) | |
1037 | { | |
1038 | struct eisa_device *edev; | |
1039 | struct net_device *dev; | |
1040 | struct vortex_private *vp; | |
1041 | long ioaddr; | |
1042 | ||
1043 | edev = to_eisa_device (device); | |
1044 | dev = eisa_get_drvdata (edev); | |
1045 | ||
1046 | if (!dev) { | |
1047 | printk("vortex_eisa_remove called for Compaq device!\n"); | |
1048 | BUG(); | |
1049 | } | |
1050 | ||
1051 | vp = netdev_priv(dev); | |
1052 | ioaddr = dev->base_addr; | |
1053 | ||
1054 | unregister_netdev (dev); | |
1055 | outw (TotalReset|0x14, ioaddr + EL3_CMD); | |
1056 | release_region (ioaddr, VORTEX_TOTAL_SIZE); | |
1057 | ||
1058 | free_netdev (dev); | |
1059 | return 0; | |
1060 | } | |
1061 | #endif | |
1062 | ||
1063 | /* returns count found (>= 0), or negative on error */ | |
1064 | static int __init vortex_eisa_init (void) | |
1065 | { | |
1066 | int eisa_found = 0; | |
1067 | int orig_cards_found = vortex_cards_found; | |
1068 | ||
1069 | #ifdef CONFIG_EISA | |
1070 | if (eisa_driver_register (&vortex_eisa_driver) >= 0) { | |
1071 | /* Because of the way EISA bus is probed, we cannot assume | |
1072 | * any device have been found when we exit from | |
1073 | * eisa_driver_register (the bus root driver may not be | |
1074 | * initialized yet). So we blindly assume something was | |
1075 | * found, and let the sysfs magic happend... */ | |
1076 | ||
1077 | eisa_found = 1; | |
1078 | } | |
1079 | #endif | |
1080 | ||
1081 | /* Special code to work-around the Compaq PCI BIOS32 problem. */ | |
1082 | if (compaq_ioaddr) { | |
1083 | vortex_probe1(NULL, compaq_ioaddr, compaq_irq, | |
1084 | compaq_device_id, vortex_cards_found++); | |
1085 | } | |
1086 | ||
1087 | return vortex_cards_found - orig_cards_found + eisa_found; | |
1088 | } | |
1089 | ||
1090 | /* returns count (>= 0), or negative on error */ | |
1091 | static int __devinit vortex_init_one (struct pci_dev *pdev, | |
1092 | const struct pci_device_id *ent) | |
1093 | { | |
1094 | int rc; | |
1095 | ||
1096 | /* wake up and enable device */ | |
1097 | rc = pci_enable_device (pdev); | |
1098 | if (rc < 0) | |
1099 | goto out; | |
1100 | ||
1101 | rc = vortex_probe1 (&pdev->dev, pci_resource_start (pdev, 0), | |
1102 | pdev->irq, ent->driver_data, vortex_cards_found); | |
1103 | if (rc < 0) { | |
1104 | pci_disable_device (pdev); | |
1105 | goto out; | |
1106 | } | |
1107 | ||
1108 | vortex_cards_found++; | |
1109 | ||
1110 | out: | |
1111 | return rc; | |
1112 | } | |
1113 | ||
1114 | /* | |
1115 | * Start up the PCI/EISA device which is described by *gendev. | |
1116 | * Return 0 on success. | |
1117 | * | |
1118 | * NOTE: pdev can be NULL, for the case of a Compaq device | |
1119 | */ | |
1120 | static int __devinit vortex_probe1(struct device *gendev, | |
1121 | long ioaddr, int irq, | |
1122 | int chip_idx, int card_idx) | |
1123 | { | |
1124 | struct vortex_private *vp; | |
1125 | int option; | |
1126 | unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ | |
1127 | int i, step; | |
1128 | struct net_device *dev; | |
1129 | static int printed_version; | |
1130 | int retval, print_info; | |
1131 | struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx]; | |
1132 | char *print_name = "3c59x"; | |
1133 | struct pci_dev *pdev = NULL; | |
1134 | struct eisa_device *edev = NULL; | |
1135 | ||
1136 | if (!printed_version) { | |
1137 | printk (version); | |
1138 | printed_version = 1; | |
1139 | } | |
1140 | ||
1141 | if (gendev) { | |
1142 | if ((pdev = DEVICE_PCI(gendev))) { | |
1143 | print_name = pci_name(pdev); | |
1144 | } | |
1145 | ||
1146 | if ((edev = DEVICE_EISA(gendev))) { | |
1147 | print_name = edev->dev.bus_id; | |
1148 | } | |
1149 | } | |
1150 | ||
1151 | dev = alloc_etherdev(sizeof(*vp)); | |
1152 | retval = -ENOMEM; | |
1153 | if (!dev) { | |
1154 | printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n"); | |
1155 | goto out; | |
1156 | } | |
1157 | SET_MODULE_OWNER(dev); | |
1158 | SET_NETDEV_DEV(dev, gendev); | |
1159 | vp = netdev_priv(dev); | |
1160 | ||
1161 | option = global_options; | |
1162 | ||
1163 | /* The lower four bits are the media type. */ | |
1164 | if (dev->mem_start) { | |
1165 | /* | |
1166 | * The 'options' param is passed in as the third arg to the | |
1167 | * LILO 'ether=' argument for non-modular use | |
1168 | */ | |
1169 | option = dev->mem_start; | |
1170 | } | |
1171 | else if (card_idx < MAX_UNITS) { | |
1172 | if (options[card_idx] >= 0) | |
1173 | option = options[card_idx]; | |
1174 | } | |
1175 | ||
1176 | if (option > 0) { | |
1177 | if (option & 0x8000) | |
1178 | vortex_debug = 7; | |
1179 | if (option & 0x4000) | |
1180 | vortex_debug = 2; | |
1181 | if (option & 0x0400) | |
1182 | vp->enable_wol = 1; | |
1183 | } | |
1184 | ||
1185 | print_info = (vortex_debug > 1); | |
1186 | if (print_info) | |
1187 | printk (KERN_INFO "See Documentation/networking/vortex.txt\n"); | |
1188 | ||
1189 | printk(KERN_INFO "%s: 3Com %s %s at 0x%lx. Vers " DRV_VERSION "\n", | |
1190 | print_name, | |
1191 | pdev ? "PCI" : "EISA", | |
1192 | vci->name, | |
1193 | ioaddr); | |
1194 | ||
1195 | dev->base_addr = ioaddr; | |
1196 | dev->irq = irq; | |
1197 | dev->mtu = mtu; | |
1198 | vp->large_frames = mtu > 1500; | |
1199 | vp->drv_flags = vci->drv_flags; | |
1200 | vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; | |
1201 | vp->io_size = vci->io_size; | |
1202 | vp->card_idx = card_idx; | |
1203 | ||
1204 | /* module list only for Compaq device */ | |
1205 | if (gendev == NULL) { | |
1206 | compaq_net_device = dev; | |
1207 | } | |
1208 | ||
1209 | /* PCI-only startup logic */ | |
1210 | if (pdev) { | |
1211 | /* EISA resources already marked, so only PCI needs to do this here */ | |
1212 | /* Ignore return value, because Cardbus drivers already allocate for us */ | |
1213 | if (request_region(ioaddr, vci->io_size, print_name) != NULL) | |
1214 | vp->must_free_region = 1; | |
1215 | ||
1216 | /* enable bus-mastering if necessary */ | |
1217 | if (vci->flags & PCI_USES_MASTER) | |
1218 | pci_set_master (pdev); | |
1219 | ||
1220 | if (vci->drv_flags & IS_VORTEX) { | |
1221 | u8 pci_latency; | |
1222 | u8 new_latency = 248; | |
1223 | ||
1224 | /* Check the PCI latency value. On the 3c590 series the latency timer | |
1225 | must be set to the maximum value to avoid data corruption that occurs | |
1226 | when the timer expires during a transfer. This bug exists the Vortex | |
1227 | chip only. */ | |
1228 | pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); | |
1229 | if (pci_latency < new_latency) { | |
1230 | printk(KERN_INFO "%s: Overriding PCI latency" | |
1231 | " timer (CFLT) setting of %d, new value is %d.\n", | |
1232 | print_name, pci_latency, new_latency); | |
1233 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); | |
1234 | } | |
1235 | } | |
1236 | } | |
1237 | ||
1238 | spin_lock_init(&vp->lock); | |
1239 | vp->gendev = gendev; | |
1240 | vp->mii.dev = dev; | |
1241 | vp->mii.mdio_read = mdio_read; | |
1242 | vp->mii.mdio_write = mdio_write; | |
1243 | vp->mii.phy_id_mask = 0x1f; | |
1244 | vp->mii.reg_num_mask = 0x1f; | |
1245 | ||
1246 | /* Makes sure rings are at least 16 byte aligned. */ | |
1247 | vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE | |
1248 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | |
1249 | &vp->rx_ring_dma); | |
1250 | retval = -ENOMEM; | |
1251 | if (vp->rx_ring == 0) | |
1252 | goto free_region; | |
1253 | ||
1254 | vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); | |
1255 | vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; | |
1256 | ||
1257 | /* if we are a PCI driver, we store info in pdev->driver_data | |
1258 | * instead of a module list */ | |
1259 | if (pdev) | |
1260 | pci_set_drvdata(pdev, dev); | |
1261 | if (edev) | |
1262 | eisa_set_drvdata (edev, dev); | |
1263 | ||
1264 | vp->media_override = 7; | |
1265 | if (option >= 0) { | |
1266 | vp->media_override = ((option & 7) == 2) ? 0 : option & 15; | |
1267 | if (vp->media_override != 7) | |
1268 | vp->medialock = 1; | |
1269 | vp->full_duplex = (option & 0x200) ? 1 : 0; | |
1270 | vp->bus_master = (option & 16) ? 1 : 0; | |
1271 | } | |
1272 | ||
1273 | if (global_full_duplex > 0) | |
1274 | vp->full_duplex = 1; | |
1275 | if (global_enable_wol > 0) | |
1276 | vp->enable_wol = 1; | |
1277 | ||
1278 | if (card_idx < MAX_UNITS) { | |
1279 | if (full_duplex[card_idx] > 0) | |
1280 | vp->full_duplex = 1; | |
1281 | if (flow_ctrl[card_idx] > 0) | |
1282 | vp->flow_ctrl = 1; | |
1283 | if (enable_wol[card_idx] > 0) | |
1284 | vp->enable_wol = 1; | |
1285 | } | |
1286 | ||
1287 | vp->force_fd = vp->full_duplex; | |
1288 | vp->options = option; | |
1289 | /* Read the station address from the EEPROM. */ | |
1290 | EL3WINDOW(0); | |
1291 | { | |
1292 | int base; | |
1293 | ||
1294 | if (vci->drv_flags & EEPROM_8BIT) | |
1295 | base = 0x230; | |
1296 | else if (vci->drv_flags & EEPROM_OFFSET) | |
1297 | base = EEPROM_Read + 0x30; | |
1298 | else | |
1299 | base = EEPROM_Read; | |
1300 | ||
1301 | for (i = 0; i < 0x40; i++) { | |
1302 | int timer; | |
1303 | outw(base + i, ioaddr + Wn0EepromCmd); | |
1304 | /* Pause for at least 162 us. for the read to take place. */ | |
1305 | for (timer = 10; timer >= 0; timer--) { | |
1306 | udelay(162); | |
1307 | if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0) | |
1308 | break; | |
1309 | } | |
1310 | eeprom[i] = inw(ioaddr + Wn0EepromData); | |
1311 | } | |
1312 | } | |
1313 | for (i = 0; i < 0x18; i++) | |
1314 | checksum ^= eeprom[i]; | |
1315 | checksum = (checksum ^ (checksum >> 8)) & 0xff; | |
1316 | if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */ | |
1317 | while (i < 0x21) | |
1318 | checksum ^= eeprom[i++]; | |
1319 | checksum = (checksum ^ (checksum >> 8)) & 0xff; | |
1320 | } | |
1321 | if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) | |
1322 | printk(" ***INVALID CHECKSUM %4.4x*** ", checksum); | |
1323 | for (i = 0; i < 3; i++) | |
1324 | ((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); | |
1325 | if (print_info) { | |
1326 | for (i = 0; i < 6; i++) | |
1327 | printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]); | |
1328 | } | |
1329 | /* Unfortunately an all zero eeprom passes the checksum and this | |
1330 | gets found in the wild in failure cases. Crypto is hard 8) */ | |
1331 | if (!is_valid_ether_addr(dev->dev_addr)) { | |
1332 | retval = -EINVAL; | |
1333 | printk(KERN_ERR "*** EEPROM MAC address is invalid.\n"); | |
1334 | goto free_ring; /* With every pack */ | |
1335 | } | |
1336 | EL3WINDOW(2); | |
1337 | for (i = 0; i < 6; i++) | |
1338 | outb(dev->dev_addr[i], ioaddr + i); | |
1339 | ||
1340 | #ifdef __sparc__ | |
1341 | if (print_info) | |
1342 | printk(", IRQ %s\n", __irq_itoa(dev->irq)); | |
1343 | #else | |
1344 | if (print_info) | |
1345 | printk(", IRQ %d\n", dev->irq); | |
1346 | /* Tell them about an invalid IRQ. */ | |
1347 | if (dev->irq <= 0 || dev->irq >= NR_IRQS) | |
1348 | printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n", | |
1349 | dev->irq); | |
1350 | #endif | |
1351 | ||
1352 | EL3WINDOW(4); | |
1353 | step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1; | |
1354 | if (print_info) { | |
1355 | printk(KERN_INFO " product code %02x%02x rev %02x.%d date %02d-" | |
1356 | "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], | |
1357 | step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); | |
1358 | } | |
1359 | ||
1360 | ||
1361 | if (pdev && vci->drv_flags & HAS_CB_FNS) { | |
1362 | unsigned long fn_st_addr; /* Cardbus function status space */ | |
1363 | unsigned short n; | |
1364 | ||
1365 | fn_st_addr = pci_resource_start (pdev, 2); | |
1366 | if (fn_st_addr) { | |
1367 | vp->cb_fn_base = ioremap(fn_st_addr, 128); | |
1368 | retval = -ENOMEM; | |
1369 | if (!vp->cb_fn_base) | |
1370 | goto free_ring; | |
1371 | } | |
1372 | if (print_info) { | |
1373 | printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n", | |
1374 | print_name, fn_st_addr, vp->cb_fn_base); | |
1375 | } | |
1376 | EL3WINDOW(2); | |
1377 | ||
1378 | n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010; | |
1379 | if (vp->drv_flags & INVERT_LED_PWR) | |
1380 | n |= 0x10; | |
1381 | if (vp->drv_flags & INVERT_MII_PWR) | |
1382 | n |= 0x4000; | |
1383 | outw(n, ioaddr + Wn2_ResetOptions); | |
1384 | if (vp->drv_flags & WNO_XCVR_PWR) { | |
1385 | EL3WINDOW(0); | |
1386 | outw(0x0800, ioaddr); | |
1387 | } | |
1388 | } | |
1389 | ||
1390 | /* Extract our information from the EEPROM data. */ | |
1391 | vp->info1 = eeprom[13]; | |
1392 | vp->info2 = eeprom[15]; | |
1393 | vp->capabilities = eeprom[16]; | |
1394 | ||
1395 | if (vp->info1 & 0x8000) { | |
1396 | vp->full_duplex = 1; | |
1397 | if (print_info) | |
1398 | printk(KERN_INFO "Full duplex capable\n"); | |
1399 | } | |
1400 | ||
1401 | { | |
1402 | static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | |
1403 | unsigned int config; | |
1404 | EL3WINDOW(3); | |
1405 | vp->available_media = inw(ioaddr + Wn3_Options); | |
1406 | if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ | |
1407 | vp->available_media = 0x40; | |
1408 | config = inl(ioaddr + Wn3_Config); | |
1409 | if (print_info) { | |
1410 | printk(KERN_DEBUG " Internal config register is %4.4x, " | |
1411 | "transceivers %#x.\n", config, inw(ioaddr + Wn3_Options)); | |
1412 | printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", | |
1413 | 8 << RAM_SIZE(config), | |
1414 | RAM_WIDTH(config) ? "word" : "byte", | |
1415 | ram_split[RAM_SPLIT(config)], | |
1416 | AUTOSELECT(config) ? "autoselect/" : "", | |
1417 | XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" : | |
1418 | media_tbl[XCVR(config)].name); | |
1419 | } | |
1420 | vp->default_media = XCVR(config); | |
1421 | if (vp->default_media == XCVR_NWAY) | |
1422 | vp->has_nway = 1; | |
1423 | vp->autoselect = AUTOSELECT(config); | |
1424 | } | |
1425 | ||
1426 | if (vp->media_override != 7) { | |
1427 | printk(KERN_INFO "%s: Media override to transceiver type %d (%s).\n", | |
1428 | print_name, vp->media_override, | |
1429 | media_tbl[vp->media_override].name); | |
1430 | dev->if_port = vp->media_override; | |
1431 | } else | |
1432 | dev->if_port = vp->default_media; | |
1433 | ||
1434 | if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || | |
1435 | dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { | |
1436 | int phy, phy_idx = 0; | |
1437 | EL3WINDOW(4); | |
1438 | mii_preamble_required++; | |
1439 | if (vp->drv_flags & EXTRA_PREAMBLE) | |
1440 | mii_preamble_required++; | |
1441 | mdio_sync(ioaddr, 32); | |
1442 | mdio_read(dev, 24, 1); | |
1443 | for (phy = 0; phy < 32 && phy_idx < 1; phy++) { | |
1444 | int mii_status, phyx; | |
1445 | ||
1446 | /* | |
1447 | * For the 3c905CX we look at index 24 first, because it bogusly | |
1448 | * reports an external PHY at all indices | |
1449 | */ | |
1450 | if (phy == 0) | |
1451 | phyx = 24; | |
1452 | else if (phy <= 24) | |
1453 | phyx = phy - 1; | |
1454 | else | |
1455 | phyx = phy; | |
1456 | mii_status = mdio_read(dev, phyx, 1); | |
1457 | if (mii_status && mii_status != 0xffff) { | |
1458 | vp->phys[phy_idx++] = phyx; | |
1459 | if (print_info) { | |
1460 | printk(KERN_INFO " MII transceiver found at address %d," | |
1461 | " status %4x.\n", phyx, mii_status); | |
1462 | } | |
1463 | if ((mii_status & 0x0040) == 0) | |
1464 | mii_preamble_required++; | |
1465 | } | |
1466 | } | |
1467 | mii_preamble_required--; | |
1468 | if (phy_idx == 0) { | |
1469 | printk(KERN_WARNING" ***WARNING*** No MII transceivers found!\n"); | |
1470 | vp->phys[0] = 24; | |
1471 | } else { | |
1472 | vp->advertising = mdio_read(dev, vp->phys[0], 4); | |
1473 | if (vp->full_duplex) { | |
1474 | /* Only advertise the FD media types. */ | |
1475 | vp->advertising &= ~0x02A0; | |
1476 | mdio_write(dev, vp->phys[0], 4, vp->advertising); | |
1477 | } | |
1478 | } | |
1479 | vp->mii.phy_id = vp->phys[0]; | |
1480 | } | |
1481 | ||
1482 | if (vp->capabilities & CapBusMaster) { | |
1483 | vp->full_bus_master_tx = 1; | |
1484 | if (print_info) { | |
1485 | printk(KERN_INFO " Enabling bus-master transmits and %s receives.\n", | |
1486 | (vp->info2 & 1) ? "early" : "whole-frame" ); | |
1487 | } | |
1488 | vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2; | |
1489 | vp->bus_master = 0; /* AKPM: vortex only */ | |
1490 | } | |
1491 | ||
1492 | /* The 3c59x-specific entries in the device structure. */ | |
1493 | dev->open = vortex_open; | |
1494 | if (vp->full_bus_master_tx) { | |
1495 | dev->hard_start_xmit = boomerang_start_xmit; | |
1496 | /* Actually, it still should work with iommu. */ | |
1497 | dev->features |= NETIF_F_SG; | |
1498 | if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) || | |
1499 | (hw_checksums[card_idx] == 1)) { | |
1500 | dev->features |= NETIF_F_IP_CSUM; | |
1501 | } | |
1502 | } else { | |
1503 | dev->hard_start_xmit = vortex_start_xmit; | |
1504 | } | |
1505 | ||
1506 | if (print_info) { | |
1507 | printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n", | |
1508 | print_name, | |
1509 | (dev->features & NETIF_F_SG) ? "en":"dis", | |
1510 | (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); | |
1511 | } | |
1512 | ||
1513 | dev->stop = vortex_close; | |
1514 | dev->get_stats = vortex_get_stats; | |
1515 | #ifdef CONFIG_PCI | |
1516 | dev->do_ioctl = vortex_ioctl; | |
1517 | #endif | |
1518 | dev->ethtool_ops = &vortex_ethtool_ops; | |
1519 | dev->set_multicast_list = set_rx_mode; | |
1520 | dev->tx_timeout = vortex_tx_timeout; | |
1521 | dev->watchdog_timeo = (watchdog * HZ) / 1000; | |
1522 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1523 | dev->poll_controller = poll_vortex; | |
1524 | #endif | |
1525 | if (pdev) { | |
1526 | vp->pm_state_valid = 1; | |
1527 | pci_save_state(VORTEX_PCI(vp)); | |
1528 | acpi_set_WOL(dev); | |
1529 | } | |
1530 | retval = register_netdev(dev); | |
1531 | if (retval == 0) | |
1532 | return 0; | |
1533 | ||
1534 | free_ring: | |
1535 | pci_free_consistent(pdev, | |
1536 | sizeof(struct boom_rx_desc) * RX_RING_SIZE | |
1537 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | |
1538 | vp->rx_ring, | |
1539 | vp->rx_ring_dma); | |
1540 | free_region: | |
1541 | if (vp->must_free_region) | |
1542 | release_region(ioaddr, vci->io_size); | |
1543 | free_netdev(dev); | |
1544 | printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval); | |
1545 | out: | |
1546 | return retval; | |
1547 | } | |
1548 | ||
1549 | static void | |
1550 | issue_and_wait(struct net_device *dev, int cmd) | |
1551 | { | |
1552 | int i; | |
1553 | ||
1554 | outw(cmd, dev->base_addr + EL3_CMD); | |
1555 | for (i = 0; i < 2000; i++) { | |
1556 | if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) | |
1557 | return; | |
1558 | } | |
1559 | ||
1560 | /* OK, that didn't work. Do it the slow way. One second */ | |
1561 | for (i = 0; i < 100000; i++) { | |
1562 | if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) { | |
1563 | if (vortex_debug > 1) | |
1564 | printk(KERN_INFO "%s: command 0x%04x took %d usecs\n", | |
1565 | dev->name, cmd, i * 10); | |
1566 | return; | |
1567 | } | |
1568 | udelay(10); | |
1569 | } | |
1570 | printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n", | |
1571 | dev->name, cmd, inw(dev->base_addr + EL3_STATUS)); | |
1572 | } | |
1573 | ||
1574 | static void | |
1575 | vortex_up(struct net_device *dev) | |
1576 | { | |
1577 | long ioaddr = dev->base_addr; | |
1578 | struct vortex_private *vp = netdev_priv(dev); | |
1579 | unsigned int config; | |
1580 | int i; | |
1581 | ||
1582 | if (VORTEX_PCI(vp)) { | |
1583 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ | |
1584 | pci_restore_state(VORTEX_PCI(vp)); | |
1585 | pci_enable_device(VORTEX_PCI(vp)); | |
1586 | } | |
1587 | ||
1588 | /* Before initializing select the active media port. */ | |
1589 | EL3WINDOW(3); | |
1590 | config = inl(ioaddr + Wn3_Config); | |
1591 | ||
1592 | if (vp->media_override != 7) { | |
1593 | printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n", | |
1594 | dev->name, vp->media_override, | |
1595 | media_tbl[vp->media_override].name); | |
1596 | dev->if_port = vp->media_override; | |
1597 | } else if (vp->autoselect) { | |
1598 | if (vp->has_nway) { | |
1599 | if (vortex_debug > 1) | |
1600 | printk(KERN_INFO "%s: using NWAY device table, not %d\n", | |
1601 | dev->name, dev->if_port); | |
1602 | dev->if_port = XCVR_NWAY; | |
1603 | } else { | |
1604 | /* Find first available media type, starting with 100baseTx. */ | |
1605 | dev->if_port = XCVR_100baseTx; | |
1606 | while (! (vp->available_media & media_tbl[dev->if_port].mask)) | |
1607 | dev->if_port = media_tbl[dev->if_port].next; | |
1608 | if (vortex_debug > 1) | |
1609 | printk(KERN_INFO "%s: first available media type: %s\n", | |
1610 | dev->name, media_tbl[dev->if_port].name); | |
1611 | } | |
1612 | } else { | |
1613 | dev->if_port = vp->default_media; | |
1614 | if (vortex_debug > 1) | |
1615 | printk(KERN_INFO "%s: using default media %s\n", | |
1616 | dev->name, media_tbl[dev->if_port].name); | |
1617 | } | |
1618 | ||
1619 | init_timer(&vp->timer); | |
1620 | vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); | |
1621 | vp->timer.data = (unsigned long)dev; | |
1622 | vp->timer.function = vortex_timer; /* timer handler */ | |
1623 | add_timer(&vp->timer); | |
1624 | ||
1625 | init_timer(&vp->rx_oom_timer); | |
1626 | vp->rx_oom_timer.data = (unsigned long)dev; | |
1627 | vp->rx_oom_timer.function = rx_oom_timer; | |
1628 | ||
1629 | if (vortex_debug > 1) | |
1630 | printk(KERN_DEBUG "%s: Initial media type %s.\n", | |
1631 | dev->name, media_tbl[dev->if_port].name); | |
1632 | ||
1633 | vp->full_duplex = vp->force_fd; | |
1634 | config = BFINS(config, dev->if_port, 20, 4); | |
1635 | if (vortex_debug > 6) | |
1636 | printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config); | |
1637 | outl(config, ioaddr + Wn3_Config); | |
1638 | ||
1639 | if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { | |
1640 | int mii_reg1, mii_reg5; | |
1641 | EL3WINDOW(4); | |
1642 | /* Read BMSR (reg1) only to clear old status. */ | |
1643 | mii_reg1 = mdio_read(dev, vp->phys[0], 1); | |
1644 | mii_reg5 = mdio_read(dev, vp->phys[0], 5); | |
1645 | if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) { | |
1646 | netif_carrier_off(dev); /* No MII device or no link partner report */ | |
1647 | } else { | |
1648 | mii_reg5 &= vp->advertising; | |
1649 | if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */ | |
1650 | || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */ | |
1651 | vp->full_duplex = 1; | |
1652 | netif_carrier_on(dev); | |
1653 | } | |
1654 | vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); | |
1655 | if (vortex_debug > 1) | |
1656 | printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x," | |
1657 | " info1 %04x, setting %s-duplex.\n", | |
1658 | dev->name, vp->phys[0], | |
1659 | mii_reg1, mii_reg5, | |
1660 | vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half"); | |
1661 | EL3WINDOW(3); | |
1662 | } | |
1663 | ||
1664 | /* Set the full-duplex bit. */ | |
1665 | outw( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | | |
1666 | (vp->large_frames ? 0x40 : 0) | | |
1667 | ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), | |
1668 | ioaddr + Wn3_MAC_Ctrl); | |
1669 | ||
1670 | if (vortex_debug > 1) { | |
1671 | printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n", | |
1672 | dev->name, config); | |
1673 | } | |
1674 | ||
1675 | issue_and_wait(dev, TxReset); | |
1676 | /* | |
1677 | * Don't reset the PHY - that upsets autonegotiation during DHCP operations. | |
1678 | */ | |
1679 | issue_and_wait(dev, RxReset|0x04); | |
1680 | ||
1681 | outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); | |
1682 | ||
1683 | if (vortex_debug > 1) { | |
1684 | EL3WINDOW(4); | |
1685 | printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n", | |
1686 | dev->name, dev->irq, inw(ioaddr + Wn4_Media)); | |
1687 | } | |
1688 | ||
1689 | /* Set the station address and mask in window 2 each time opened. */ | |
1690 | EL3WINDOW(2); | |
1691 | for (i = 0; i < 6; i++) | |
1692 | outb(dev->dev_addr[i], ioaddr + i); | |
1693 | for (; i < 12; i+=2) | |
1694 | outw(0, ioaddr + i); | |
1695 | ||
1696 | if (vp->cb_fn_base) { | |
1697 | unsigned short n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010; | |
1698 | if (vp->drv_flags & INVERT_LED_PWR) | |
1699 | n |= 0x10; | |
1700 | if (vp->drv_flags & INVERT_MII_PWR) | |
1701 | n |= 0x4000; | |
1702 | outw(n, ioaddr + Wn2_ResetOptions); | |
1703 | } | |
1704 | ||
1705 | if (dev->if_port == XCVR_10base2) | |
1706 | /* Start the thinnet transceiver. We should really wait 50ms...*/ | |
1707 | outw(StartCoax, ioaddr + EL3_CMD); | |
1708 | if (dev->if_port != XCVR_NWAY) { | |
1709 | EL3WINDOW(4); | |
1710 | outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) | | |
1711 | media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); | |
1712 | } | |
1713 | ||
1714 | /* Switch to the stats window, and clear all stats by reading. */ | |
1715 | outw(StatsDisable, ioaddr + EL3_CMD); | |
1716 | EL3WINDOW(6); | |
1717 | for (i = 0; i < 10; i++) | |
1718 | inb(ioaddr + i); | |
1719 | inw(ioaddr + 10); | |
1720 | inw(ioaddr + 12); | |
1721 | /* New: On the Vortex we must also clear the BadSSD counter. */ | |
1722 | EL3WINDOW(4); | |
1723 | inb(ioaddr + 12); | |
1724 | /* ..and on the Boomerang we enable the extra statistics bits. */ | |
1725 | outw(0x0040, ioaddr + Wn4_NetDiag); | |
1726 | ||
1727 | /* Switch to register set 7 for normal use. */ | |
1728 | EL3WINDOW(7); | |
1729 | ||
1730 | if (vp->full_bus_master_rx) { /* Boomerang bus master. */ | |
1731 | vp->cur_rx = vp->dirty_rx = 0; | |
1732 | /* Initialize the RxEarly register as recommended. */ | |
1733 | outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); | |
1734 | outl(0x0020, ioaddr + PktStatus); | |
1735 | outl(vp->rx_ring_dma, ioaddr + UpListPtr); | |
1736 | } | |
1737 | if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ | |
1738 | vp->cur_tx = vp->dirty_tx = 0; | |
1739 | if (vp->drv_flags & IS_BOOMERANG) | |
1740 | outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ | |
1741 | /* Clear the Rx, Tx rings. */ | |
1742 | for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ | |
1743 | vp->rx_ring[i].status = 0; | |
1744 | for (i = 0; i < TX_RING_SIZE; i++) | |
1745 | vp->tx_skbuff[i] = NULL; | |
1746 | outl(0, ioaddr + DownListPtr); | |
1747 | } | |
1748 | /* Set receiver mode: presumably accept b-case and phys addr only. */ | |
1749 | set_rx_mode(dev); | |
1750 | /* enable 802.1q tagged frames */ | |
1751 | set_8021q_mode(dev, 1); | |
1752 | outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ | |
1753 | ||
1754 | // issue_and_wait(dev, SetTxStart|0x07ff); | |
1755 | outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ | |
1756 | outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ | |
1757 | /* Allow status bits to be seen. */ | |
1758 | vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| | |
1759 | (vp->full_bus_master_tx ? DownComplete : TxAvailable) | | |
1760 | (vp->full_bus_master_rx ? UpComplete : RxComplete) | | |
1761 | (vp->bus_master ? DMADone : 0); | |
1762 | vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | | |
1763 | (vp->full_bus_master_rx ? 0 : RxComplete) | | |
1764 | StatsFull | HostError | TxComplete | IntReq | |
1765 | | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; | |
1766 | outw(vp->status_enable, ioaddr + EL3_CMD); | |
1767 | /* Ack all pending events, and set active indicator mask. */ | |
1768 | outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, | |
1769 | ioaddr + EL3_CMD); | |
1770 | outw(vp->intr_enable, ioaddr + EL3_CMD); | |
1771 | if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ | |
1772 | writel(0x8000, vp->cb_fn_base + 4); | |
1773 | netif_start_queue (dev); | |
1774 | } | |
1775 | ||
1776 | static int | |
1777 | vortex_open(struct net_device *dev) | |
1778 | { | |
1779 | struct vortex_private *vp = netdev_priv(dev); | |
1780 | int i; | |
1781 | int retval; | |
1782 | ||
1783 | /* Use the now-standard shared IRQ implementation. */ | |
1784 | if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? | |
1785 | &boomerang_interrupt : &vortex_interrupt, SA_SHIRQ, dev->name, dev))) { | |
1786 | printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq); | |
1787 | goto out; | |
1788 | } | |
1789 | ||
1790 | if (vp->full_bus_master_rx) { /* Boomerang bus master. */ | |
1791 | if (vortex_debug > 2) | |
1792 | printk(KERN_DEBUG "%s: Filling in the Rx ring.\n", dev->name); | |
1793 | for (i = 0; i < RX_RING_SIZE; i++) { | |
1794 | struct sk_buff *skb; | |
1795 | vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); | |
1796 | vp->rx_ring[i].status = 0; /* Clear complete bit. */ | |
1797 | vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); | |
1798 | skb = dev_alloc_skb(PKT_BUF_SZ); | |
1799 | vp->rx_skbuff[i] = skb; | |
1800 | if (skb == NULL) | |
1801 | break; /* Bad news! */ | |
1802 | skb->dev = dev; /* Mark as being used by this device. */ | |
1803 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | |
1804 | vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); | |
1805 | } | |
1806 | if (i != RX_RING_SIZE) { | |
1807 | int j; | |
1808 | printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name); | |
1809 | for (j = 0; j < i; j++) { | |
1810 | if (vp->rx_skbuff[j]) { | |
1811 | dev_kfree_skb(vp->rx_skbuff[j]); | |
1812 | vp->rx_skbuff[j] = NULL; | |
1813 | } | |
1814 | } | |
1815 | retval = -ENOMEM; | |
1816 | goto out_free_irq; | |
1817 | } | |
1818 | /* Wrap the ring. */ | |
1819 | vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); | |
1820 | } | |
1821 | ||
1822 | vortex_up(dev); | |
1823 | return 0; | |
1824 | ||
1825 | out_free_irq: | |
1826 | free_irq(dev->irq, dev); | |
1827 | out: | |
1828 | if (vortex_debug > 1) | |
1829 | printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", dev->name, retval); | |
1830 | return retval; | |
1831 | } | |
1832 | ||
1833 | static void | |
1834 | vortex_timer(unsigned long data) | |
1835 | { | |
1836 | struct net_device *dev = (struct net_device *)data; | |
1837 | struct vortex_private *vp = netdev_priv(dev); | |
1838 | long ioaddr = dev->base_addr; | |
1839 | int next_tick = 60*HZ; | |
1840 | int ok = 0; | |
1841 | int media_status, mii_status, old_window; | |
1842 | ||
1843 | if (vortex_debug > 2) { | |
1844 | printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n", | |
1845 | dev->name, media_tbl[dev->if_port].name); | |
1846 | printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo); | |
1847 | } | |
1848 | ||
1849 | if (vp->medialock) | |
1850 | goto leave_media_alone; | |
1851 | disable_irq(dev->irq); | |
1852 | old_window = inw(ioaddr + EL3_CMD) >> 13; | |
1853 | EL3WINDOW(4); | |
1854 | media_status = inw(ioaddr + Wn4_Media); | |
1855 | switch (dev->if_port) { | |
1856 | case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: | |
1857 | if (media_status & Media_LnkBeat) { | |
1858 | netif_carrier_on(dev); | |
1859 | ok = 1; | |
1860 | if (vortex_debug > 1) | |
1861 | printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n", | |
1862 | dev->name, media_tbl[dev->if_port].name, media_status); | |
1863 | } else { | |
1864 | netif_carrier_off(dev); | |
1865 | if (vortex_debug > 1) { | |
1866 | printk(KERN_DEBUG "%s: Media %s has no link beat, %x.\n", | |
1867 | dev->name, media_tbl[dev->if_port].name, media_status); | |
1868 | } | |
1869 | } | |
1870 | break; | |
1871 | case XCVR_MII: case XCVR_NWAY: | |
1872 | { | |
1873 | spin_lock_bh(&vp->lock); | |
1874 | mii_status = mdio_read(dev, vp->phys[0], 1); | |
1875 | ok = 1; | |
1876 | if (vortex_debug > 2) | |
1877 | printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n", | |
1878 | dev->name, mii_status); | |
1879 | if (mii_status & BMSR_LSTATUS) { | |
1880 | int mii_reg5 = mdio_read(dev, vp->phys[0], 5); | |
1881 | if (! vp->force_fd && mii_reg5 != 0xffff) { | |
1882 | int duplex; | |
1883 | ||
1884 | mii_reg5 &= vp->advertising; | |
1885 | duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040; | |
1886 | if (vp->full_duplex != duplex) { | |
1887 | vp->full_duplex = duplex; | |
1888 | printk(KERN_INFO "%s: Setting %s-duplex based on MII " | |
1889 | "#%d link partner capability of %4.4x.\n", | |
1890 | dev->name, vp->full_duplex ? "full" : "half", | |
1891 | vp->phys[0], mii_reg5); | |
1892 | /* Set the full-duplex bit. */ | |
1893 | EL3WINDOW(3); | |
1894 | outw( (vp->full_duplex ? 0x20 : 0) | | |
1895 | (vp->large_frames ? 0x40 : 0) | | |
1896 | ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), | |
1897 | ioaddr + Wn3_MAC_Ctrl); | |
1898 | if (vortex_debug > 1) | |
1899 | printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n"); | |
1900 | /* AKPM: bug: should reset Tx and Rx after setting Duplex. Page 180 */ | |
1901 | } | |
1902 | } | |
1903 | netif_carrier_on(dev); | |
1904 | } else { | |
1905 | netif_carrier_off(dev); | |
1906 | } | |
1907 | spin_unlock_bh(&vp->lock); | |
1908 | } | |
1909 | break; | |
1910 | default: /* Other media types handled by Tx timeouts. */ | |
1911 | if (vortex_debug > 1) | |
1912 | printk(KERN_DEBUG "%s: Media %s has no indication, %x.\n", | |
1913 | dev->name, media_tbl[dev->if_port].name, media_status); | |
1914 | ok = 1; | |
1915 | } | |
1916 | if ( ! ok) { | |
1917 | unsigned int config; | |
1918 | ||
1919 | do { | |
1920 | dev->if_port = media_tbl[dev->if_port].next; | |
1921 | } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); | |
1922 | if (dev->if_port == XCVR_Default) { /* Go back to default. */ | |
1923 | dev->if_port = vp->default_media; | |
1924 | if (vortex_debug > 1) | |
1925 | printk(KERN_DEBUG "%s: Media selection failing, using default " | |
1926 | "%s port.\n", | |
1927 | dev->name, media_tbl[dev->if_port].name); | |
1928 | } else { | |
1929 | if (vortex_debug > 1) | |
1930 | printk(KERN_DEBUG "%s: Media selection failed, now trying " | |
1931 | "%s port.\n", | |
1932 | dev->name, media_tbl[dev->if_port].name); | |
1933 | next_tick = media_tbl[dev->if_port].wait; | |
1934 | } | |
1935 | outw((media_status & ~(Media_10TP|Media_SQE)) | | |
1936 | media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); | |
1937 | ||
1938 | EL3WINDOW(3); | |
1939 | config = inl(ioaddr + Wn3_Config); | |
1940 | config = BFINS(config, dev->if_port, 20, 4); | |
1941 | outl(config, ioaddr + Wn3_Config); | |
1942 | ||
1943 | outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, | |
1944 | ioaddr + EL3_CMD); | |
1945 | if (vortex_debug > 1) | |
1946 | printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config); | |
1947 | /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ | |
1948 | } | |
1949 | EL3WINDOW(old_window); | |
1950 | enable_irq(dev->irq); | |
1951 | ||
1952 | leave_media_alone: | |
1953 | if (vortex_debug > 2) | |
1954 | printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n", | |
1955 | dev->name, media_tbl[dev->if_port].name); | |
1956 | ||
1957 | mod_timer(&vp->timer, RUN_AT(next_tick)); | |
1958 | if (vp->deferred) | |
1959 | outw(FakeIntr, ioaddr + EL3_CMD); | |
1960 | return; | |
1961 | } | |
1962 | ||
1963 | static void vortex_tx_timeout(struct net_device *dev) | |
1964 | { | |
1965 | struct vortex_private *vp = netdev_priv(dev); | |
1966 | long ioaddr = dev->base_addr; | |
1967 | ||
1968 | printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n", | |
1969 | dev->name, inb(ioaddr + TxStatus), | |
1970 | inw(ioaddr + EL3_STATUS)); | |
1971 | EL3WINDOW(4); | |
1972 | printk(KERN_ERR " diagnostics: net %04x media %04x dma %08x fifo %04x\n", | |
1973 | inw(ioaddr + Wn4_NetDiag), | |
1974 | inw(ioaddr + Wn4_Media), | |
1975 | inl(ioaddr + PktStatus), | |
1976 | inw(ioaddr + Wn4_FIFODiag)); | |
1977 | /* Slight code bloat to be user friendly. */ | |
1978 | if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) | |
1979 | printk(KERN_ERR "%s: Transmitter encountered 16 collisions --" | |
1980 | " network cable problem?\n", dev->name); | |
1981 | if (inw(ioaddr + EL3_STATUS) & IntLatch) { | |
1982 | printk(KERN_ERR "%s: Interrupt posted but not delivered --" | |
1983 | " IRQ blocked by another device?\n", dev->name); | |
1984 | /* Bad idea here.. but we might as well handle a few events. */ | |
1985 | { | |
1986 | /* | |
1987 | * Block interrupts because vortex_interrupt does a bare spin_lock() | |
1988 | */ | |
1989 | unsigned long flags; | |
1990 | local_irq_save(flags); | |
1991 | if (vp->full_bus_master_tx) | |
1992 | boomerang_interrupt(dev->irq, dev, NULL); | |
1993 | else | |
1994 | vortex_interrupt(dev->irq, dev, NULL); | |
1995 | local_irq_restore(flags); | |
1996 | } | |
1997 | } | |
1998 | ||
1999 | if (vortex_debug > 0) | |
2000 | dump_tx_ring(dev); | |
2001 | ||
2002 | issue_and_wait(dev, TxReset); | |
2003 | ||
2004 | vp->stats.tx_errors++; | |
2005 | if (vp->full_bus_master_tx) { | |
2006 | printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); | |
2007 | if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0) | |
2008 | outl(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), | |
2009 | ioaddr + DownListPtr); | |
2010 | if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) | |
2011 | netif_wake_queue (dev); | |
2012 | if (vp->drv_flags & IS_BOOMERANG) | |
2013 | outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); | |
2014 | outw(DownUnstall, ioaddr + EL3_CMD); | |
2015 | } else { | |
2016 | vp->stats.tx_dropped++; | |
2017 | netif_wake_queue(dev); | |
2018 | } | |
2019 | ||
2020 | /* Issue Tx Enable */ | |
2021 | outw(TxEnable, ioaddr + EL3_CMD); | |
2022 | dev->trans_start = jiffies; | |
2023 | ||
2024 | /* Switch to register set 7 for normal use. */ | |
2025 | EL3WINDOW(7); | |
2026 | } | |
2027 | ||
2028 | /* | |
2029 | * Handle uncommon interrupt sources. This is a separate routine to minimize | |
2030 | * the cache impact. | |
2031 | */ | |
2032 | static void | |
2033 | vortex_error(struct net_device *dev, int status) | |
2034 | { | |
2035 | struct vortex_private *vp = netdev_priv(dev); | |
2036 | long ioaddr = dev->base_addr; | |
2037 | int do_tx_reset = 0, reset_mask = 0; | |
2038 | unsigned char tx_status = 0; | |
2039 | ||
2040 | if (vortex_debug > 2) { | |
2041 | printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", dev->name, status); | |
2042 | } | |
2043 | ||
2044 | if (status & TxComplete) { /* Really "TxError" for us. */ | |
2045 | tx_status = inb(ioaddr + TxStatus); | |
2046 | /* Presumably a tx-timeout. We must merely re-enable. */ | |
2047 | if (vortex_debug > 2 | |
2048 | || (tx_status != 0x88 && vortex_debug > 0)) { | |
2049 | printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n", | |
2050 | dev->name, tx_status); | |
2051 | if (tx_status == 0x82) { | |
2052 | printk(KERN_ERR "Probably a duplex mismatch. See " | |
2053 | "Documentation/networking/vortex.txt\n"); | |
2054 | } | |
2055 | dump_tx_ring(dev); | |
2056 | } | |
2057 | if (tx_status & 0x14) vp->stats.tx_fifo_errors++; | |
2058 | if (tx_status & 0x38) vp->stats.tx_aborted_errors++; | |
2059 | outb(0, ioaddr + TxStatus); | |
2060 | if (tx_status & 0x30) { /* txJabber or txUnderrun */ | |
2061 | do_tx_reset = 1; | |
2062 | } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ | |
2063 | do_tx_reset = 1; | |
2064 | reset_mask = 0x0108; /* Reset interface logic, but not download logic */ | |
2065 | } else { /* Merely re-enable the transmitter. */ | |
2066 | outw(TxEnable, ioaddr + EL3_CMD); | |
2067 | } | |
2068 | } | |
2069 | ||
2070 | if (status & RxEarly) { /* Rx early is unused. */ | |
2071 | vortex_rx(dev); | |
2072 | outw(AckIntr | RxEarly, ioaddr + EL3_CMD); | |
2073 | } | |
2074 | if (status & StatsFull) { /* Empty statistics. */ | |
2075 | static int DoneDidThat; | |
2076 | if (vortex_debug > 4) | |
2077 | printk(KERN_DEBUG "%s: Updating stats.\n", dev->name); | |
2078 | update_stats(ioaddr, dev); | |
2079 | /* HACK: Disable statistics as an interrupt source. */ | |
2080 | /* This occurs when we have the wrong media type! */ | |
2081 | if (DoneDidThat == 0 && | |
2082 | inw(ioaddr + EL3_STATUS) & StatsFull) { | |
2083 | printk(KERN_WARNING "%s: Updating statistics failed, disabling " | |
2084 | "stats as an interrupt source.\n", dev->name); | |
2085 | EL3WINDOW(5); | |
2086 | outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD); | |
2087 | vp->intr_enable &= ~StatsFull; | |
2088 | EL3WINDOW(7); | |
2089 | DoneDidThat++; | |
2090 | } | |
2091 | } | |
2092 | if (status & IntReq) { /* Restore all interrupt sources. */ | |
2093 | outw(vp->status_enable, ioaddr + EL3_CMD); | |
2094 | outw(vp->intr_enable, ioaddr + EL3_CMD); | |
2095 | } | |
2096 | if (status & HostError) { | |
2097 | u16 fifo_diag; | |
2098 | EL3WINDOW(4); | |
2099 | fifo_diag = inw(ioaddr + Wn4_FIFODiag); | |
2100 | printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n", | |
2101 | dev->name, fifo_diag); | |
2102 | /* Adapter failure requires Tx/Rx reset and reinit. */ | |
2103 | if (vp->full_bus_master_tx) { | |
2104 | int bus_status = inl(ioaddr + PktStatus); | |
2105 | /* 0x80000000 PCI master abort. */ | |
2106 | /* 0x40000000 PCI target abort. */ | |
2107 | if (vortex_debug) | |
2108 | printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status); | |
2109 | ||
2110 | /* In this case, blow the card away */ | |
2111 | /* Must not enter D3 or we can't legally issue the reset! */ | |
2112 | vortex_down(dev, 0); | |
2113 | issue_and_wait(dev, TotalReset | 0xff); | |
2114 | vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ | |
2115 | } else if (fifo_diag & 0x0400) | |
2116 | do_tx_reset = 1; | |
2117 | if (fifo_diag & 0x3000) { | |
2118 | /* Reset Rx fifo and upload logic */ | |
2119 | issue_and_wait(dev, RxReset|0x07); | |
2120 | /* Set the Rx filter to the current state. */ | |
2121 | set_rx_mode(dev); | |
2122 | /* enable 802.1q VLAN tagged frames */ | |
2123 | set_8021q_mode(dev, 1); | |
2124 | outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ | |
2125 | outw(AckIntr | HostError, ioaddr + EL3_CMD); | |
2126 | } | |
2127 | } | |
2128 | ||
2129 | if (do_tx_reset) { | |
2130 | issue_and_wait(dev, TxReset|reset_mask); | |
2131 | outw(TxEnable, ioaddr + EL3_CMD); | |
2132 | if (!vp->full_bus_master_tx) | |
2133 | netif_wake_queue(dev); | |
2134 | } | |
2135 | } | |
2136 | ||
2137 | static int | |
2138 | vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
2139 | { | |
2140 | struct vortex_private *vp = netdev_priv(dev); | |
2141 | long ioaddr = dev->base_addr; | |
2142 | ||
2143 | /* Put out the doubleword header... */ | |
2144 | outl(skb->len, ioaddr + TX_FIFO); | |
2145 | if (vp->bus_master) { | |
2146 | /* Set the bus-master controller to transfer the packet. */ | |
2147 | int len = (skb->len + 3) & ~3; | |
2148 | outl( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), | |
2149 | ioaddr + Wn7_MasterAddr); | |
2150 | outw(len, ioaddr + Wn7_MasterLen); | |
2151 | vp->tx_skb = skb; | |
2152 | outw(StartDMADown, ioaddr + EL3_CMD); | |
2153 | /* netif_wake_queue() will be called at the DMADone interrupt. */ | |
2154 | } else { | |
2155 | /* ... and the packet rounded to a doubleword. */ | |
2156 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); | |
2157 | dev_kfree_skb (skb); | |
2158 | if (inw(ioaddr + TxFree) > 1536) { | |
2159 | netif_start_queue (dev); /* AKPM: redundant? */ | |
2160 | } else { | |
2161 | /* Interrupt us when the FIFO has room for max-sized packet. */ | |
2162 | netif_stop_queue(dev); | |
2163 | outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); | |
2164 | } | |
2165 | } | |
2166 | ||
2167 | dev->trans_start = jiffies; | |
2168 | ||
2169 | /* Clear the Tx status stack. */ | |
2170 | { | |
2171 | int tx_status; | |
2172 | int i = 32; | |
2173 | ||
2174 | while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) { | |
2175 | if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ | |
2176 | if (vortex_debug > 2) | |
2177 | printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n", | |
2178 | dev->name, tx_status); | |
2179 | if (tx_status & 0x04) vp->stats.tx_fifo_errors++; | |
2180 | if (tx_status & 0x38) vp->stats.tx_aborted_errors++; | |
2181 | if (tx_status & 0x30) { | |
2182 | issue_and_wait(dev, TxReset); | |
2183 | } | |
2184 | outw(TxEnable, ioaddr + EL3_CMD); | |
2185 | } | |
2186 | outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ | |
2187 | } | |
2188 | } | |
2189 | return 0; | |
2190 | } | |
2191 | ||
2192 | static int | |
2193 | boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
2194 | { | |
2195 | struct vortex_private *vp = netdev_priv(dev); | |
2196 | long ioaddr = dev->base_addr; | |
2197 | /* Calculate the next Tx descriptor entry. */ | |
2198 | int entry = vp->cur_tx % TX_RING_SIZE; | |
2199 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; | |
2200 | unsigned long flags; | |
2201 | ||
2202 | if (vortex_debug > 6) { | |
2203 | printk(KERN_DEBUG "boomerang_start_xmit()\n"); | |
2204 | if (vortex_debug > 3) | |
2205 | printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n", | |
2206 | dev->name, vp->cur_tx); | |
2207 | } | |
2208 | ||
2209 | if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { | |
2210 | if (vortex_debug > 0) | |
2211 | printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n", | |
2212 | dev->name); | |
2213 | netif_stop_queue(dev); | |
2214 | return 1; | |
2215 | } | |
2216 | ||
2217 | vp->tx_skbuff[entry] = skb; | |
2218 | ||
2219 | vp->tx_ring[entry].next = 0; | |
2220 | #if DO_ZEROCOPY | |
2221 | if (skb->ip_summed != CHECKSUM_HW) | |
2222 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); | |
2223 | else | |
2224 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); | |
2225 | ||
2226 | if (!skb_shinfo(skb)->nr_frags) { | |
2227 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | |
2228 | skb->len, PCI_DMA_TODEVICE)); | |
2229 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); | |
2230 | } else { | |
2231 | int i; | |
2232 | ||
2233 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | |
2234 | skb->len-skb->data_len, PCI_DMA_TODEVICE)); | |
2235 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len); | |
2236 | ||
2237 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
2238 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
2239 | ||
2240 | vp->tx_ring[entry].frag[i+1].addr = | |
2241 | cpu_to_le32(pci_map_single(VORTEX_PCI(vp), | |
2242 | (void*)page_address(frag->page) + frag->page_offset, | |
2243 | frag->size, PCI_DMA_TODEVICE)); | |
2244 | ||
2245 | if (i == skb_shinfo(skb)->nr_frags-1) | |
2246 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG); | |
2247 | else | |
2248 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size); | |
2249 | } | |
2250 | } | |
2251 | #else | |
2252 | vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); | |
2253 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); | |
2254 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); | |
2255 | #endif | |
2256 | ||
2257 | spin_lock_irqsave(&vp->lock, flags); | |
2258 | /* Wait for the stall to complete. */ | |
2259 | issue_and_wait(dev, DownStall); | |
2260 | prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); | |
2261 | if (inl(ioaddr + DownListPtr) == 0) { | |
2262 | outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); | |
2263 | vp->queued_packet++; | |
2264 | } | |
2265 | ||
2266 | vp->cur_tx++; | |
2267 | if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { | |
2268 | netif_stop_queue (dev); | |
2269 | } else { /* Clear previous interrupt enable. */ | |
2270 | #if defined(tx_interrupt_mitigation) | |
2271 | /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef | |
2272 | * were selected, this would corrupt DN_COMPLETE. No? | |
2273 | */ | |
2274 | prev_entry->status &= cpu_to_le32(~TxIntrUploaded); | |
2275 | #endif | |
2276 | } | |
2277 | outw(DownUnstall, ioaddr + EL3_CMD); | |
2278 | spin_unlock_irqrestore(&vp->lock, flags); | |
2279 | dev->trans_start = jiffies; | |
2280 | return 0; | |
2281 | } | |
2282 | ||
2283 | /* The interrupt handler does all of the Rx thread work and cleans up | |
2284 | after the Tx thread. */ | |
2285 | ||
2286 | /* | |
2287 | * This is the ISR for the vortex series chips. | |
2288 | * full_bus_master_tx == 0 && full_bus_master_rx == 0 | |
2289 | */ | |
2290 | ||
2291 | static irqreturn_t | |
2292 | vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
2293 | { | |
2294 | struct net_device *dev = dev_id; | |
2295 | struct vortex_private *vp = netdev_priv(dev); | |
2296 | long ioaddr; | |
2297 | int status; | |
2298 | int work_done = max_interrupt_work; | |
2299 | int handled = 0; | |
2300 | ||
2301 | ioaddr = dev->base_addr; | |
2302 | spin_lock(&vp->lock); | |
2303 | ||
2304 | status = inw(ioaddr + EL3_STATUS); | |
2305 | ||
2306 | if (vortex_debug > 6) | |
2307 | printk("vortex_interrupt(). status=0x%4x\n", status); | |
2308 | ||
2309 | if ((status & IntLatch) == 0) | |
2310 | goto handler_exit; /* No interrupt: shared IRQs cause this */ | |
2311 | handled = 1; | |
2312 | ||
2313 | if (status & IntReq) { | |
2314 | status |= vp->deferred; | |
2315 | vp->deferred = 0; | |
2316 | } | |
2317 | ||
2318 | if (status == 0xffff) /* h/w no longer present (hotplug)? */ | |
2319 | goto handler_exit; | |
2320 | ||
2321 | if (vortex_debug > 4) | |
2322 | printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n", | |
2323 | dev->name, status, inb(ioaddr + Timer)); | |
2324 | ||
2325 | do { | |
2326 | if (vortex_debug > 5) | |
2327 | printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n", | |
2328 | dev->name, status); | |
2329 | if (status & RxComplete) | |
2330 | vortex_rx(dev); | |
2331 | ||
2332 | if (status & TxAvailable) { | |
2333 | if (vortex_debug > 5) | |
2334 | printk(KERN_DEBUG " TX room bit was handled.\n"); | |
2335 | /* There's room in the FIFO for a full-sized packet. */ | |
2336 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); | |
2337 | netif_wake_queue (dev); | |
2338 | } | |
2339 | ||
2340 | if (status & DMADone) { | |
2341 | if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) { | |
2342 | outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ | |
2343 | pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); | |
2344 | dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ | |
2345 | if (inw(ioaddr + TxFree) > 1536) { | |
2346 | /* | |
2347 | * AKPM: FIXME: I don't think we need this. If the queue was stopped due to | |
2348 | * insufficient FIFO room, the TxAvailable test will succeed and call | |
2349 | * netif_wake_queue() | |
2350 | */ | |
2351 | netif_wake_queue(dev); | |
2352 | } else { /* Interrupt when FIFO has room for max-sized packet. */ | |
2353 | outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); | |
2354 | netif_stop_queue(dev); | |
2355 | } | |
2356 | } | |
2357 | } | |
2358 | /* Check for all uncommon interrupts at once. */ | |
2359 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { | |
2360 | if (status == 0xffff) | |
2361 | break; | |
2362 | vortex_error(dev, status); | |
2363 | } | |
2364 | ||
2365 | if (--work_done < 0) { | |
2366 | printk(KERN_WARNING "%s: Too much work in interrupt, status " | |
2367 | "%4.4x.\n", dev->name, status); | |
2368 | /* Disable all pending interrupts. */ | |
2369 | do { | |
2370 | vp->deferred |= status; | |
2371 | outw(SetStatusEnb | (~vp->deferred & vp->status_enable), | |
2372 | ioaddr + EL3_CMD); | |
2373 | outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); | |
2374 | } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch); | |
2375 | /* The timer will reenable interrupts. */ | |
2376 | mod_timer(&vp->timer, jiffies + 1*HZ); | |
2377 | break; | |
2378 | } | |
2379 | /* Acknowledge the IRQ. */ | |
2380 | outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); | |
2381 | } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); | |
2382 | ||
2383 | if (vortex_debug > 4) | |
2384 | printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", | |
2385 | dev->name, status); | |
2386 | handler_exit: | |
2387 | spin_unlock(&vp->lock); | |
2388 | return IRQ_RETVAL(handled); | |
2389 | } | |
2390 | ||
2391 | /* | |
2392 | * This is the ISR for the boomerang series chips. | |
2393 | * full_bus_master_tx == 1 && full_bus_master_rx == 1 | |
2394 | */ | |
2395 | ||
2396 | static irqreturn_t | |
2397 | boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
2398 | { | |
2399 | struct net_device *dev = dev_id; | |
2400 | struct vortex_private *vp = netdev_priv(dev); | |
2401 | long ioaddr; | |
2402 | int status; | |
2403 | int work_done = max_interrupt_work; | |
2404 | ||
2405 | ioaddr = dev->base_addr; | |
2406 | ||
2407 | /* | |
2408 | * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout | |
2409 | * and boomerang_start_xmit | |
2410 | */ | |
2411 | spin_lock(&vp->lock); | |
2412 | ||
2413 | status = inw(ioaddr + EL3_STATUS); | |
2414 | ||
2415 | if (vortex_debug > 6) | |
2416 | printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status); | |
2417 | ||
2418 | if ((status & IntLatch) == 0) | |
2419 | goto handler_exit; /* No interrupt: shared IRQs can cause this */ | |
2420 | ||
2421 | if (status == 0xffff) { /* h/w no longer present (hotplug)? */ | |
2422 | if (vortex_debug > 1) | |
2423 | printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n"); | |
2424 | goto handler_exit; | |
2425 | } | |
2426 | ||
2427 | if (status & IntReq) { | |
2428 | status |= vp->deferred; | |
2429 | vp->deferred = 0; | |
2430 | } | |
2431 | ||
2432 | if (vortex_debug > 4) | |
2433 | printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n", | |
2434 | dev->name, status, inb(ioaddr + Timer)); | |
2435 | do { | |
2436 | if (vortex_debug > 5) | |
2437 | printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n", | |
2438 | dev->name, status); | |
2439 | if (status & UpComplete) { | |
2440 | outw(AckIntr | UpComplete, ioaddr + EL3_CMD); | |
2441 | if (vortex_debug > 5) | |
2442 | printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n"); | |
2443 | boomerang_rx(dev); | |
2444 | } | |
2445 | ||
2446 | if (status & DownComplete) { | |
2447 | unsigned int dirty_tx = vp->dirty_tx; | |
2448 | ||
2449 | outw(AckIntr | DownComplete, ioaddr + EL3_CMD); | |
2450 | while (vp->cur_tx - dirty_tx > 0) { | |
2451 | int entry = dirty_tx % TX_RING_SIZE; | |
2452 | #if 1 /* AKPM: the latter is faster, but cyclone-only */ | |
2453 | if (inl(ioaddr + DownListPtr) == | |
2454 | vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) | |
2455 | break; /* It still hasn't been processed. */ | |
2456 | #else | |
2457 | if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0) | |
2458 | break; /* It still hasn't been processed. */ | |
2459 | #endif | |
2460 | ||
2461 | if (vp->tx_skbuff[entry]) { | |
2462 | struct sk_buff *skb = vp->tx_skbuff[entry]; | |
2463 | #if DO_ZEROCOPY | |
2464 | int i; | |
2465 | for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) | |
2466 | pci_unmap_single(VORTEX_PCI(vp), | |
2467 | le32_to_cpu(vp->tx_ring[entry].frag[i].addr), | |
2468 | le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, | |
2469 | PCI_DMA_TODEVICE); | |
2470 | #else | |
2471 | pci_unmap_single(VORTEX_PCI(vp), | |
2472 | le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); | |
2473 | #endif | |
2474 | dev_kfree_skb_irq(skb); | |
2475 | vp->tx_skbuff[entry] = NULL; | |
2476 | } else { | |
2477 | printk(KERN_DEBUG "boomerang_interrupt: no skb!\n"); | |
2478 | } | |
2479 | /* vp->stats.tx_packets++; Counted below. */ | |
2480 | dirty_tx++; | |
2481 | } | |
2482 | vp->dirty_tx = dirty_tx; | |
2483 | if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { | |
2484 | if (vortex_debug > 6) | |
2485 | printk(KERN_DEBUG "boomerang_interrupt: wake queue\n"); | |
2486 | netif_wake_queue (dev); | |
2487 | } | |
2488 | } | |
2489 | ||
2490 | /* Check for all uncommon interrupts at once. */ | |
2491 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) | |
2492 | vortex_error(dev, status); | |
2493 | ||
2494 | if (--work_done < 0) { | |
2495 | printk(KERN_WARNING "%s: Too much work in interrupt, status " | |
2496 | "%4.4x.\n", dev->name, status); | |
2497 | /* Disable all pending interrupts. */ | |
2498 | do { | |
2499 | vp->deferred |= status; | |
2500 | outw(SetStatusEnb | (~vp->deferred & vp->status_enable), | |
2501 | ioaddr + EL3_CMD); | |
2502 | outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); | |
2503 | } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch); | |
2504 | /* The timer will reenable interrupts. */ | |
2505 | mod_timer(&vp->timer, jiffies + 1*HZ); | |
2506 | break; | |
2507 | } | |
2508 | /* Acknowledge the IRQ. */ | |
2509 | outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); | |
2510 | if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ | |
2511 | writel(0x8000, vp->cb_fn_base + 4); | |
2512 | ||
2513 | } while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch); | |
2514 | ||
2515 | if (vortex_debug > 4) | |
2516 | printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", | |
2517 | dev->name, status); | |
2518 | handler_exit: | |
2519 | spin_unlock(&vp->lock); | |
2520 | return IRQ_HANDLED; | |
2521 | } | |
2522 | ||
2523 | static int vortex_rx(struct net_device *dev) | |
2524 | { | |
2525 | struct vortex_private *vp = netdev_priv(dev); | |
2526 | long ioaddr = dev->base_addr; | |
2527 | int i; | |
2528 | short rx_status; | |
2529 | ||
2530 | if (vortex_debug > 5) | |
2531 | printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n", | |
2532 | inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); | |
2533 | while ((rx_status = inw(ioaddr + RxStatus)) > 0) { | |
2534 | if (rx_status & 0x4000) { /* Error, update stats. */ | |
2535 | unsigned char rx_error = inb(ioaddr + RxErrors); | |
2536 | if (vortex_debug > 2) | |
2537 | printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); | |
2538 | vp->stats.rx_errors++; | |
2539 | if (rx_error & 0x01) vp->stats.rx_over_errors++; | |
2540 | if (rx_error & 0x02) vp->stats.rx_length_errors++; | |
2541 | if (rx_error & 0x04) vp->stats.rx_frame_errors++; | |
2542 | if (rx_error & 0x08) vp->stats.rx_crc_errors++; | |
2543 | if (rx_error & 0x10) vp->stats.rx_length_errors++; | |
2544 | } else { | |
2545 | /* The packet length: up to 4.5K!. */ | |
2546 | int pkt_len = rx_status & 0x1fff; | |
2547 | struct sk_buff *skb; | |
2548 | ||
2549 | skb = dev_alloc_skb(pkt_len + 5); | |
2550 | if (vortex_debug > 4) | |
2551 | printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", | |
2552 | pkt_len, rx_status); | |
2553 | if (skb != NULL) { | |
2554 | skb->dev = dev; | |
2555 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | |
2556 | /* 'skb_put()' points to the start of sk_buff data area. */ | |
2557 | if (vp->bus_master && | |
2558 | ! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) { | |
2559 | dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), | |
2560 | pkt_len, PCI_DMA_FROMDEVICE); | |
2561 | outl(dma, ioaddr + Wn7_MasterAddr); | |
2562 | outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); | |
2563 | outw(StartDMAUp, ioaddr + EL3_CMD); | |
2564 | while (inw(ioaddr + Wn7_MasterStatus) & 0x8000) | |
2565 | ; | |
2566 | pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); | |
2567 | } else { | |
2568 | insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len), | |
2569 | (pkt_len + 3) >> 2); | |
2570 | } | |
2571 | outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ | |
2572 | skb->protocol = eth_type_trans(skb, dev); | |
2573 | netif_rx(skb); | |
2574 | dev->last_rx = jiffies; | |
2575 | vp->stats.rx_packets++; | |
2576 | /* Wait a limited time to go to next packet. */ | |
2577 | for (i = 200; i >= 0; i--) | |
2578 | if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) | |
2579 | break; | |
2580 | continue; | |
2581 | } else if (vortex_debug > 0) | |
2582 | printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " | |
2583 | "size %d.\n", dev->name, pkt_len); | |
2584 | } | |
2585 | vp->stats.rx_dropped++; | |
2586 | issue_and_wait(dev, RxDiscard); | |
2587 | } | |
2588 | ||
2589 | return 0; | |
2590 | } | |
2591 | ||
2592 | static int | |
2593 | boomerang_rx(struct net_device *dev) | |
2594 | { | |
2595 | struct vortex_private *vp = netdev_priv(dev); | |
2596 | int entry = vp->cur_rx % RX_RING_SIZE; | |
2597 | long ioaddr = dev->base_addr; | |
2598 | int rx_status; | |
2599 | int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; | |
2600 | ||
2601 | if (vortex_debug > 5) | |
2602 | printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", inw(ioaddr+EL3_STATUS)); | |
2603 | ||
2604 | while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ | |
2605 | if (--rx_work_limit < 0) | |
2606 | break; | |
2607 | if (rx_status & RxDError) { /* Error, update stats. */ | |
2608 | unsigned char rx_error = rx_status >> 16; | |
2609 | if (vortex_debug > 2) | |
2610 | printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error); | |
2611 | vp->stats.rx_errors++; | |
2612 | if (rx_error & 0x01) vp->stats.rx_over_errors++; | |
2613 | if (rx_error & 0x02) vp->stats.rx_length_errors++; | |
2614 | if (rx_error & 0x04) vp->stats.rx_frame_errors++; | |
2615 | if (rx_error & 0x08) vp->stats.rx_crc_errors++; | |
2616 | if (rx_error & 0x10) vp->stats.rx_length_errors++; | |
2617 | } else { | |
2618 | /* The packet length: up to 4.5K!. */ | |
2619 | int pkt_len = rx_status & 0x1fff; | |
2620 | struct sk_buff *skb; | |
2621 | dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); | |
2622 | ||
2623 | if (vortex_debug > 4) | |
2624 | printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n", | |
2625 | pkt_len, rx_status); | |
2626 | ||
2627 | /* Check if the packet is long enough to just accept without | |
2628 | copying to a properly sized skbuff. */ | |
2629 | if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) { | |
2630 | skb->dev = dev; | |
2631 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | |
2632 | pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | |
2633 | /* 'skb_put()' points to the start of sk_buff data area. */ | |
2634 | memcpy(skb_put(skb, pkt_len), | |
2635 | vp->rx_skbuff[entry]->tail, | |
2636 | pkt_len); | |
2637 | pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | |
2638 | vp->rx_copy++; | |
2639 | } else { | |
2640 | /* Pass up the skbuff already on the Rx ring. */ | |
2641 | skb = vp->rx_skbuff[entry]; | |
2642 | vp->rx_skbuff[entry] = NULL; | |
2643 | skb_put(skb, pkt_len); | |
2644 | pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | |
2645 | vp->rx_nocopy++; | |
2646 | } | |
2647 | skb->protocol = eth_type_trans(skb, dev); | |
2648 | { /* Use hardware checksum info. */ | |
2649 | int csum_bits = rx_status & 0xee000000; | |
2650 | if (csum_bits && | |
2651 | (csum_bits == (IPChksumValid | TCPChksumValid) || | |
2652 | csum_bits == (IPChksumValid | UDPChksumValid))) { | |
2653 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
2654 | vp->rx_csumhits++; | |
2655 | } | |
2656 | } | |
2657 | netif_rx(skb); | |
2658 | dev->last_rx = jiffies; | |
2659 | vp->stats.rx_packets++; | |
2660 | } | |
2661 | entry = (++vp->cur_rx) % RX_RING_SIZE; | |
2662 | } | |
2663 | /* Refill the Rx ring buffers. */ | |
2664 | for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { | |
2665 | struct sk_buff *skb; | |
2666 | entry = vp->dirty_rx % RX_RING_SIZE; | |
2667 | if (vp->rx_skbuff[entry] == NULL) { | |
2668 | skb = dev_alloc_skb(PKT_BUF_SZ); | |
2669 | if (skb == NULL) { | |
2670 | static unsigned long last_jif; | |
2671 | if ((jiffies - last_jif) > 10 * HZ) { | |
2672 | printk(KERN_WARNING "%s: memory shortage\n", dev->name); | |
2673 | last_jif = jiffies; | |
2674 | } | |
2675 | if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) | |
2676 | mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); | |
2677 | break; /* Bad news! */ | |
2678 | } | |
2679 | skb->dev = dev; /* Mark as being used by this device. */ | |
2680 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | |
2681 | vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); | |
2682 | vp->rx_skbuff[entry] = skb; | |
2683 | } | |
2684 | vp->rx_ring[entry].status = 0; /* Clear complete bit. */ | |
2685 | outw(UpUnstall, ioaddr + EL3_CMD); | |
2686 | } | |
2687 | return 0; | |
2688 | } | |
2689 | ||
2690 | /* | |
2691 | * If we've hit a total OOM refilling the Rx ring we poll once a second | |
2692 | * for some memory. Otherwise there is no way to restart the rx process. | |
2693 | */ | |
2694 | static void | |
2695 | rx_oom_timer(unsigned long arg) | |
2696 | { | |
2697 | struct net_device *dev = (struct net_device *)arg; | |
2698 | struct vortex_private *vp = netdev_priv(dev); | |
2699 | ||
2700 | spin_lock_irq(&vp->lock); | |
2701 | if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ | |
2702 | boomerang_rx(dev); | |
2703 | if (vortex_debug > 1) { | |
2704 | printk(KERN_DEBUG "%s: rx_oom_timer %s\n", dev->name, | |
2705 | ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); | |
2706 | } | |
2707 | spin_unlock_irq(&vp->lock); | |
2708 | } | |
2709 | ||
2710 | static void | |
2711 | vortex_down(struct net_device *dev, int final_down) | |
2712 | { | |
2713 | struct vortex_private *vp = netdev_priv(dev); | |
2714 | long ioaddr = dev->base_addr; | |
2715 | ||
2716 | netif_stop_queue (dev); | |
2717 | ||
2718 | del_timer_sync(&vp->rx_oom_timer); | |
2719 | del_timer_sync(&vp->timer); | |
2720 | ||
2721 | /* Turn off statistics ASAP. We update vp->stats below. */ | |
2722 | outw(StatsDisable, ioaddr + EL3_CMD); | |
2723 | ||
2724 | /* Disable the receiver and transmitter. */ | |
2725 | outw(RxDisable, ioaddr + EL3_CMD); | |
2726 | outw(TxDisable, ioaddr + EL3_CMD); | |
2727 | ||
2728 | /* Disable receiving 802.1q tagged frames */ | |
2729 | set_8021q_mode(dev, 0); | |
2730 | ||
2731 | if (dev->if_port == XCVR_10base2) | |
2732 | /* Turn off thinnet power. Green! */ | |
2733 | outw(StopCoax, ioaddr + EL3_CMD); | |
2734 | ||
2735 | outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); | |
2736 | ||
2737 | update_stats(ioaddr, dev); | |
2738 | if (vp->full_bus_master_rx) | |
2739 | outl(0, ioaddr + UpListPtr); | |
2740 | if (vp->full_bus_master_tx) | |
2741 | outl(0, ioaddr + DownListPtr); | |
2742 | ||
2743 | if (final_down && VORTEX_PCI(vp)) { | |
2744 | pci_save_state(VORTEX_PCI(vp)); | |
2745 | acpi_set_WOL(dev); | |
2746 | } | |
2747 | } | |
2748 | ||
2749 | static int | |
2750 | vortex_close(struct net_device *dev) | |
2751 | { | |
2752 | struct vortex_private *vp = netdev_priv(dev); | |
2753 | long ioaddr = dev->base_addr; | |
2754 | int i; | |
2755 | ||
2756 | if (netif_device_present(dev)) | |
2757 | vortex_down(dev, 1); | |
2758 | ||
2759 | if (vortex_debug > 1) { | |
2760 | printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n", | |
2761 | dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus)); | |
2762 | printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d" | |
2763 | " tx_queued %d Rx pre-checksummed %d.\n", | |
2764 | dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); | |
2765 | } | |
2766 | ||
2767 | #if DO_ZEROCOPY | |
2768 | if ( vp->rx_csumhits && | |
2769 | ((vp->drv_flags & HAS_HWCKSM) == 0) && | |
2770 | (hw_checksums[vp->card_idx] == -1)) { | |
2771 | printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", dev->name); | |
2772 | } | |
2773 | #endif | |
2774 | ||
2775 | free_irq(dev->irq, dev); | |
2776 | ||
2777 | if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ | |
2778 | for (i = 0; i < RX_RING_SIZE; i++) | |
2779 | if (vp->rx_skbuff[i]) { | |
2780 | pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), | |
2781 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | |
2782 | dev_kfree_skb(vp->rx_skbuff[i]); | |
2783 | vp->rx_skbuff[i] = NULL; | |
2784 | } | |
2785 | } | |
2786 | if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ | |
2787 | for (i = 0; i < TX_RING_SIZE; i++) { | |
2788 | if (vp->tx_skbuff[i]) { | |
2789 | struct sk_buff *skb = vp->tx_skbuff[i]; | |
2790 | #if DO_ZEROCOPY | |
2791 | int k; | |
2792 | ||
2793 | for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) | |
2794 | pci_unmap_single(VORTEX_PCI(vp), | |
2795 | le32_to_cpu(vp->tx_ring[i].frag[k].addr), | |
2796 | le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, | |
2797 | PCI_DMA_TODEVICE); | |
2798 | #else | |
2799 | pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); | |
2800 | #endif | |
2801 | dev_kfree_skb(skb); | |
2802 | vp->tx_skbuff[i] = NULL; | |
2803 | } | |
2804 | } | |
2805 | } | |
2806 | ||
2807 | return 0; | |
2808 | } | |
2809 | ||
2810 | static void | |
2811 | dump_tx_ring(struct net_device *dev) | |
2812 | { | |
2813 | if (vortex_debug > 0) { | |
2814 | struct vortex_private *vp = netdev_priv(dev); | |
2815 | long ioaddr = dev->base_addr; | |
2816 | ||
2817 | if (vp->full_bus_master_tx) { | |
2818 | int i; | |
2819 | int stalled = inl(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ | |
2820 | ||
2821 | printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", | |
2822 | vp->full_bus_master_tx, | |
2823 | vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, | |
2824 | vp->cur_tx, vp->cur_tx % TX_RING_SIZE); | |
2825 | printk(KERN_ERR " Transmit list %8.8x vs. %p.\n", | |
2826 | inl(ioaddr + DownListPtr), | |
2827 | &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); | |
2828 | issue_and_wait(dev, DownStall); | |
2829 | for (i = 0; i < TX_RING_SIZE; i++) { | |
2830 | printk(KERN_ERR " %d: @%p length %8.8x status %8.8x\n", i, | |
2831 | &vp->tx_ring[i], | |
2832 | #if DO_ZEROCOPY | |
2833 | le32_to_cpu(vp->tx_ring[i].frag[0].length), | |
2834 | #else | |
2835 | le32_to_cpu(vp->tx_ring[i].length), | |
2836 | #endif | |
2837 | le32_to_cpu(vp->tx_ring[i].status)); | |
2838 | } | |
2839 | if (!stalled) | |
2840 | outw(DownUnstall, ioaddr + EL3_CMD); | |
2841 | } | |
2842 | } | |
2843 | } | |
2844 | ||
2845 | static struct net_device_stats *vortex_get_stats(struct net_device *dev) | |
2846 | { | |
2847 | struct vortex_private *vp = netdev_priv(dev); | |
2848 | unsigned long flags; | |
2849 | ||
2850 | if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */ | |
2851 | spin_lock_irqsave (&vp->lock, flags); | |
2852 | update_stats(dev->base_addr, dev); | |
2853 | spin_unlock_irqrestore (&vp->lock, flags); | |
2854 | } | |
2855 | return &vp->stats; | |
2856 | } | |
2857 | ||
2858 | /* Update statistics. | |
2859 | Unlike with the EL3 we need not worry about interrupts changing | |
2860 | the window setting from underneath us, but we must still guard | |
2861 | against a race condition with a StatsUpdate interrupt updating the | |
2862 | table. This is done by checking that the ASM (!) code generated uses | |
2863 | atomic updates with '+='. | |
2864 | */ | |
2865 | static void update_stats(long ioaddr, struct net_device *dev) | |
2866 | { | |
2867 | struct vortex_private *vp = netdev_priv(dev); | |
2868 | int old_window = inw(ioaddr + EL3_CMD); | |
2869 | ||
2870 | if (old_window == 0xffff) /* Chip suspended or ejected. */ | |
2871 | return; | |
2872 | /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ | |
2873 | /* Switch to the stats window, and read everything. */ | |
2874 | EL3WINDOW(6); | |
2875 | vp->stats.tx_carrier_errors += inb(ioaddr + 0); | |
2876 | vp->stats.tx_heartbeat_errors += inb(ioaddr + 1); | |
2877 | vp->stats.collisions += inb(ioaddr + 3); | |
2878 | vp->stats.tx_window_errors += inb(ioaddr + 4); | |
2879 | vp->stats.rx_fifo_errors += inb(ioaddr + 5); | |
2880 | vp->stats.tx_packets += inb(ioaddr + 6); | |
2881 | vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4; | |
2882 | /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */ | |
2883 | /* Don't bother with register 9, an extension of registers 6&7. | |
2884 | If we do use the 6&7 values the atomic update assumption above | |
2885 | is invalid. */ | |
2886 | vp->stats.rx_bytes += inw(ioaddr + 10); | |
2887 | vp->stats.tx_bytes += inw(ioaddr + 12); | |
2888 | /* Extra stats for get_ethtool_stats() */ | |
2889 | vp->xstats.tx_multiple_collisions += inb(ioaddr + 2); | |
2890 | vp->xstats.tx_deferred += inb(ioaddr + 8); | |
2891 | EL3WINDOW(4); | |
2892 | vp->xstats.rx_bad_ssd += inb(ioaddr + 12); | |
2893 | ||
2894 | { | |
2895 | u8 up = inb(ioaddr + 13); | |
2896 | vp->stats.rx_bytes += (up & 0x0f) << 16; | |
2897 | vp->stats.tx_bytes += (up & 0xf0) << 12; | |
2898 | } | |
2899 | ||
2900 | EL3WINDOW(old_window >> 13); | |
2901 | return; | |
2902 | } | |
2903 | ||
2904 | static int vortex_nway_reset(struct net_device *dev) | |
2905 | { | |
2906 | struct vortex_private *vp = netdev_priv(dev); | |
2907 | long ioaddr = dev->base_addr; | |
2908 | unsigned long flags; | |
2909 | int rc; | |
2910 | ||
2911 | spin_lock_irqsave(&vp->lock, flags); | |
2912 | EL3WINDOW(4); | |
2913 | rc = mii_nway_restart(&vp->mii); | |
2914 | spin_unlock_irqrestore(&vp->lock, flags); | |
2915 | return rc; | |
2916 | } | |
2917 | ||
2918 | static u32 vortex_get_link(struct net_device *dev) | |
2919 | { | |
2920 | struct vortex_private *vp = netdev_priv(dev); | |
2921 | long ioaddr = dev->base_addr; | |
2922 | unsigned long flags; | |
2923 | int rc; | |
2924 | ||
2925 | spin_lock_irqsave(&vp->lock, flags); | |
2926 | EL3WINDOW(4); | |
2927 | rc = mii_link_ok(&vp->mii); | |
2928 | spin_unlock_irqrestore(&vp->lock, flags); | |
2929 | return rc; | |
2930 | } | |
2931 | ||
2932 | static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
2933 | { | |
2934 | struct vortex_private *vp = netdev_priv(dev); | |
2935 | long ioaddr = dev->base_addr; | |
2936 | unsigned long flags; | |
2937 | int rc; | |
2938 | ||
2939 | spin_lock_irqsave(&vp->lock, flags); | |
2940 | EL3WINDOW(4); | |
2941 | rc = mii_ethtool_gset(&vp->mii, cmd); | |
2942 | spin_unlock_irqrestore(&vp->lock, flags); | |
2943 | return rc; | |
2944 | } | |
2945 | ||
2946 | static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
2947 | { | |
2948 | struct vortex_private *vp = netdev_priv(dev); | |
2949 | long ioaddr = dev->base_addr; | |
2950 | unsigned long flags; | |
2951 | int rc; | |
2952 | ||
2953 | spin_lock_irqsave(&vp->lock, flags); | |
2954 | EL3WINDOW(4); | |
2955 | rc = mii_ethtool_sset(&vp->mii, cmd); | |
2956 | spin_unlock_irqrestore(&vp->lock, flags); | |
2957 | return rc; | |
2958 | } | |
2959 | ||
2960 | static u32 vortex_get_msglevel(struct net_device *dev) | |
2961 | { | |
2962 | return vortex_debug; | |
2963 | } | |
2964 | ||
2965 | static void vortex_set_msglevel(struct net_device *dev, u32 dbg) | |
2966 | { | |
2967 | vortex_debug = dbg; | |
2968 | } | |
2969 | ||
2970 | static int vortex_get_stats_count(struct net_device *dev) | |
2971 | { | |
2972 | return VORTEX_NUM_STATS; | |
2973 | } | |
2974 | ||
2975 | static void vortex_get_ethtool_stats(struct net_device *dev, | |
2976 | struct ethtool_stats *stats, u64 *data) | |
2977 | { | |
2978 | struct vortex_private *vp = netdev_priv(dev); | |
2979 | unsigned long flags; | |
2980 | ||
2981 | spin_lock_irqsave(&vp->lock, flags); | |
2982 | update_stats(dev->base_addr, dev); | |
2983 | spin_unlock_irqrestore(&vp->lock, flags); | |
2984 | ||
2985 | data[0] = vp->xstats.tx_deferred; | |
2986 | data[1] = vp->xstats.tx_multiple_collisions; | |
2987 | data[2] = vp->xstats.rx_bad_ssd; | |
2988 | } | |
2989 | ||
2990 | ||
2991 | static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data) | |
2992 | { | |
2993 | switch (stringset) { | |
2994 | case ETH_SS_STATS: | |
2995 | memcpy(data, ðtool_stats_keys, sizeof(ethtool_stats_keys)); | |
2996 | break; | |
2997 | default: | |
2998 | WARN_ON(1); | |
2999 | break; | |
3000 | } | |
3001 | } | |
3002 | ||
3003 | static void vortex_get_drvinfo(struct net_device *dev, | |
3004 | struct ethtool_drvinfo *info) | |
3005 | { | |
3006 | struct vortex_private *vp = netdev_priv(dev); | |
3007 | ||
3008 | strcpy(info->driver, DRV_NAME); | |
3009 | strcpy(info->version, DRV_VERSION); | |
3010 | if (VORTEX_PCI(vp)) { | |
3011 | strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); | |
3012 | } else { | |
3013 | if (VORTEX_EISA(vp)) | |
3014 | sprintf(info->bus_info, vp->gendev->bus_id); | |
3015 | else | |
3016 | sprintf(info->bus_info, "EISA 0x%lx %d", | |
3017 | dev->base_addr, dev->irq); | |
3018 | } | |
3019 | } | |
3020 | ||
3021 | static struct ethtool_ops vortex_ethtool_ops = { | |
3022 | .get_drvinfo = vortex_get_drvinfo, | |
3023 | .get_strings = vortex_get_strings, | |
3024 | .get_msglevel = vortex_get_msglevel, | |
3025 | .set_msglevel = vortex_set_msglevel, | |
3026 | .get_ethtool_stats = vortex_get_ethtool_stats, | |
3027 | .get_stats_count = vortex_get_stats_count, | |
3028 | .get_settings = vortex_get_settings, | |
3029 | .set_settings = vortex_set_settings, | |
3030 | .get_link = vortex_get_link, | |
3031 | .nway_reset = vortex_nway_reset, | |
3032 | }; | |
3033 | ||
3034 | #ifdef CONFIG_PCI | |
3035 | /* | |
3036 | * Must power the device up to do MDIO operations | |
3037 | */ | |
3038 | static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
3039 | { | |
3040 | int err; | |
3041 | struct vortex_private *vp = netdev_priv(dev); | |
3042 | long ioaddr = dev->base_addr; | |
3043 | unsigned long flags; | |
3044 | int state = 0; | |
3045 | ||
3046 | if(VORTEX_PCI(vp)) | |
3047 | state = VORTEX_PCI(vp)->current_state; | |
3048 | ||
3049 | /* The kernel core really should have pci_get_power_state() */ | |
3050 | ||
3051 | if(state != 0) | |
3052 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); | |
3053 | spin_lock_irqsave(&vp->lock, flags); | |
3054 | EL3WINDOW(4); | |
3055 | err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); | |
3056 | spin_unlock_irqrestore(&vp->lock, flags); | |
3057 | if(state != 0) | |
3058 | pci_set_power_state(VORTEX_PCI(vp), state); | |
3059 | ||
3060 | return err; | |
3061 | } | |
3062 | #endif | |
3063 | ||
3064 | ||
3065 | /* Pre-Cyclone chips have no documented multicast filter, so the only | |
3066 | multicast setting is to receive all multicast frames. At least | |
3067 | the chip has a very clean way to set the mode, unlike many others. */ | |
3068 | static void set_rx_mode(struct net_device *dev) | |
3069 | { | |
3070 | long ioaddr = dev->base_addr; | |
3071 | int new_mode; | |
3072 | ||
3073 | if (dev->flags & IFF_PROMISC) { | |
3074 | if (vortex_debug > 0) | |
3075 | printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name); | |
3076 | new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; | |
3077 | } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) { | |
3078 | new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; | |
3079 | } else | |
3080 | new_mode = SetRxFilter | RxStation | RxBroadcast; | |
3081 | ||
3082 | outw(new_mode, ioaddr + EL3_CMD); | |
3083 | } | |
3084 | ||
3085 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | |
3086 | /* Setup the card so that it can receive frames with an 802.1q VLAN tag. | |
3087 | Note that this must be done after each RxReset due to some backwards | |
3088 | compatibility logic in the Cyclone and Tornado ASICs */ | |
3089 | ||
3090 | /* The Ethernet Type used for 802.1q tagged frames */ | |
3091 | #define VLAN_ETHER_TYPE 0x8100 | |
3092 | ||
3093 | static void set_8021q_mode(struct net_device *dev, int enable) | |
3094 | { | |
3095 | struct vortex_private *vp = netdev_priv(dev); | |
3096 | long ioaddr = dev->base_addr; | |
3097 | int old_window = inw(ioaddr + EL3_CMD); | |
3098 | int mac_ctrl; | |
3099 | ||
3100 | if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { | |
3101 | /* cyclone and tornado chipsets can recognize 802.1q | |
3102 | * tagged frames and treat them correctly */ | |
3103 | ||
3104 | int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */ | |
3105 | if (enable) | |
3106 | max_pkt_size += 4; /* 802.1Q VLAN tag */ | |
3107 | ||
3108 | EL3WINDOW(3); | |
3109 | outw(max_pkt_size, ioaddr+Wn3_MaxPktSize); | |
3110 | ||
3111 | /* set VlanEtherType to let the hardware checksumming | |
3112 | treat tagged frames correctly */ | |
3113 | EL3WINDOW(7); | |
3114 | outw(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType); | |
3115 | } else { | |
3116 | /* on older cards we have to enable large frames */ | |
3117 | ||
3118 | vp->large_frames = dev->mtu > 1500 || enable; | |
3119 | ||
3120 | EL3WINDOW(3); | |
3121 | mac_ctrl = inw(ioaddr+Wn3_MAC_Ctrl); | |
3122 | if (vp->large_frames) | |
3123 | mac_ctrl |= 0x40; | |
3124 | else | |
3125 | mac_ctrl &= ~0x40; | |
3126 | outw(mac_ctrl, ioaddr+Wn3_MAC_Ctrl); | |
3127 | } | |
3128 | ||
3129 | EL3WINDOW(old_window); | |
3130 | } | |
3131 | #else | |
3132 | ||
3133 | static void set_8021q_mode(struct net_device *dev, int enable) | |
3134 | { | |
3135 | } | |
3136 | ||
3137 | ||
3138 | #endif | |
3139 | ||
3140 | /* MII transceiver control section. | |
3141 | Read and write the MII registers using software-generated serial | |
3142 | MDIO protocol. See the MII specifications or DP83840A data sheet | |
3143 | for details. */ | |
3144 | ||
3145 | /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually | |
3146 | met by back-to-back PCI I/O cycles, but we insert a delay to avoid | |
3147 | "overclocking" issues. */ | |
3148 | #define mdio_delay() inl(mdio_addr) | |
3149 | ||
3150 | #define MDIO_SHIFT_CLK 0x01 | |
3151 | #define MDIO_DIR_WRITE 0x04 | |
3152 | #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) | |
3153 | #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) | |
3154 | #define MDIO_DATA_READ 0x02 | |
3155 | #define MDIO_ENB_IN 0x00 | |
3156 | ||
3157 | /* Generate the preamble required for initial synchronization and | |
3158 | a few older transceivers. */ | |
3159 | static void mdio_sync(long ioaddr, int bits) | |
3160 | { | |
3161 | long mdio_addr = ioaddr + Wn4_PhysicalMgmt; | |
3162 | ||
3163 | /* Establish sync by sending at least 32 logic ones. */ | |
3164 | while (-- bits >= 0) { | |
3165 | outw(MDIO_DATA_WRITE1, mdio_addr); | |
3166 | mdio_delay(); | |
3167 | outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); | |
3168 | mdio_delay(); | |
3169 | } | |
3170 | } | |
3171 | ||
3172 | static int mdio_read(struct net_device *dev, int phy_id, int location) | |
3173 | { | |
3174 | int i; | |
3175 | long ioaddr = dev->base_addr; | |
3176 | int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; | |
3177 | unsigned int retval = 0; | |
3178 | long mdio_addr = ioaddr + Wn4_PhysicalMgmt; | |
3179 | ||
3180 | if (mii_preamble_required) | |
3181 | mdio_sync(ioaddr, 32); | |
3182 | ||
3183 | /* Shift the read command bits out. */ | |
3184 | for (i = 14; i >= 0; i--) { | |
3185 | int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; | |
3186 | outw(dataval, mdio_addr); | |
3187 | mdio_delay(); | |
3188 | outw(dataval | MDIO_SHIFT_CLK, mdio_addr); | |
3189 | mdio_delay(); | |
3190 | } | |
3191 | /* Read the two transition, 16 data, and wire-idle bits. */ | |
3192 | for (i = 19; i > 0; i--) { | |
3193 | outw(MDIO_ENB_IN, mdio_addr); | |
3194 | mdio_delay(); | |
3195 | retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); | |
3196 | outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); | |
3197 | mdio_delay(); | |
3198 | } | |
3199 | return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; | |
3200 | } | |
3201 | ||
3202 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value) | |
3203 | { | |
3204 | long ioaddr = dev->base_addr; | |
3205 | int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; | |
3206 | long mdio_addr = ioaddr + Wn4_PhysicalMgmt; | |
3207 | int i; | |
3208 | ||
3209 | if (mii_preamble_required) | |
3210 | mdio_sync(ioaddr, 32); | |
3211 | ||
3212 | /* Shift the command bits out. */ | |
3213 | for (i = 31; i >= 0; i--) { | |
3214 | int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; | |
3215 | outw(dataval, mdio_addr); | |
3216 | mdio_delay(); | |
3217 | outw(dataval | MDIO_SHIFT_CLK, mdio_addr); | |
3218 | mdio_delay(); | |
3219 | } | |
3220 | /* Leave the interface idle. */ | |
3221 | for (i = 1; i >= 0; i--) { | |
3222 | outw(MDIO_ENB_IN, mdio_addr); | |
3223 | mdio_delay(); | |
3224 | outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); | |
3225 | mdio_delay(); | |
3226 | } | |
3227 | return; | |
3228 | } | |
3229 | \f | |
3230 | /* ACPI: Advanced Configuration and Power Interface. */ | |
3231 | /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ | |
3232 | static void acpi_set_WOL(struct net_device *dev) | |
3233 | { | |
3234 | struct vortex_private *vp = netdev_priv(dev); | |
3235 | long ioaddr = dev->base_addr; | |
3236 | ||
3237 | if (vp->enable_wol) { | |
3238 | /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ | |
3239 | EL3WINDOW(7); | |
3240 | outw(2, ioaddr + 0x0c); | |
3241 | /* The RxFilter must accept the WOL frames. */ | |
3242 | outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); | |
3243 | outw(RxEnable, ioaddr + EL3_CMD); | |
3244 | ||
3245 | pci_enable_wake(VORTEX_PCI(vp), 0, 1); | |
3246 | } | |
3247 | /* Change the power state to D3; RxEnable doesn't take effect. */ | |
3248 | pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); | |
3249 | } | |
3250 | ||
3251 | ||
3252 | static void __devexit vortex_remove_one (struct pci_dev *pdev) | |
3253 | { | |
3254 | struct net_device *dev = pci_get_drvdata(pdev); | |
3255 | struct vortex_private *vp; | |
3256 | ||
3257 | if (!dev) { | |
3258 | printk("vortex_remove_one called for Compaq device!\n"); | |
3259 | BUG(); | |
3260 | } | |
3261 | ||
3262 | vp = netdev_priv(dev); | |
3263 | ||
3264 | /* AKPM: FIXME: we should have | |
3265 | * if (vp->cb_fn_base) iounmap(vp->cb_fn_base); | |
3266 | * here | |
3267 | */ | |
3268 | unregister_netdev(dev); | |
3269 | ||
3270 | if (VORTEX_PCI(vp)) { | |
3271 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ | |
3272 | if (vp->pm_state_valid) | |
3273 | pci_restore_state(VORTEX_PCI(vp)); | |
3274 | pci_disable_device(VORTEX_PCI(vp)); | |
3275 | } | |
3276 | /* Should really use issue_and_wait() here */ | |
3277 | outw(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), | |
3278 | dev->base_addr + EL3_CMD); | |
3279 | ||
3280 | pci_free_consistent(pdev, | |
3281 | sizeof(struct boom_rx_desc) * RX_RING_SIZE | |
3282 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | |
3283 | vp->rx_ring, | |
3284 | vp->rx_ring_dma); | |
3285 | if (vp->must_free_region) | |
3286 | release_region(dev->base_addr, vp->io_size); | |
3287 | free_netdev(dev); | |
3288 | } | |
3289 | ||
3290 | ||
3291 | static struct pci_driver vortex_driver = { | |
3292 | .name = "3c59x", | |
3293 | .probe = vortex_init_one, | |
3294 | .remove = __devexit_p(vortex_remove_one), | |
3295 | .id_table = vortex_pci_tbl, | |
3296 | #ifdef CONFIG_PM | |
3297 | .suspend = vortex_suspend, | |
3298 | .resume = vortex_resume, | |
3299 | #endif | |
3300 | }; | |
3301 | ||
3302 | ||
3303 | static int vortex_have_pci; | |
3304 | static int vortex_have_eisa; | |
3305 | ||
3306 | ||
3307 | static int __init vortex_init (void) | |
3308 | { | |
3309 | int pci_rc, eisa_rc; | |
3310 | ||
3311 | pci_rc = pci_module_init(&vortex_driver); | |
3312 | eisa_rc = vortex_eisa_init(); | |
3313 | ||
3314 | if (pci_rc == 0) | |
3315 | vortex_have_pci = 1; | |
3316 | if (eisa_rc > 0) | |
3317 | vortex_have_eisa = 1; | |
3318 | ||
3319 | return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV; | |
3320 | } | |
3321 | ||
3322 | ||
3323 | static void __exit vortex_eisa_cleanup (void) | |
3324 | { | |
3325 | struct vortex_private *vp; | |
3326 | long ioaddr; | |
3327 | ||
3328 | #ifdef CONFIG_EISA | |
3329 | /* Take care of the EISA devices */ | |
3330 | eisa_driver_unregister (&vortex_eisa_driver); | |
3331 | #endif | |
3332 | ||
3333 | if (compaq_net_device) { | |
3334 | vp = compaq_net_device->priv; | |
3335 | ioaddr = compaq_net_device->base_addr; | |
3336 | ||
3337 | unregister_netdev (compaq_net_device); | |
3338 | outw (TotalReset, ioaddr + EL3_CMD); | |
3339 | release_region (ioaddr, VORTEX_TOTAL_SIZE); | |
3340 | ||
3341 | free_netdev (compaq_net_device); | |
3342 | } | |
3343 | } | |
3344 | ||
3345 | ||
3346 | static void __exit vortex_cleanup (void) | |
3347 | { | |
3348 | if (vortex_have_pci) | |
3349 | pci_unregister_driver (&vortex_driver); | |
3350 | if (vortex_have_eisa) | |
3351 | vortex_eisa_cleanup (); | |
3352 | } | |
3353 | ||
3354 | ||
3355 | module_init(vortex_init); | |
3356 | module_exit(vortex_cleanup); | |
3357 | ||
3358 | \f | |
3359 | /* | |
3360 | * Local variables: | |
3361 | * c-indent-level: 4 | |
3362 | * c-basic-offset: 4 | |
3363 | * tab-width: 4 | |
3364 | * End: | |
3365 | */ |