]>
Commit | Line | Data |
---|---|---|
6c3561b0 AV |
1 | /* 8390.c: A general NS8390 ethernet driver core for linux. */ |
2 | /* | |
3 | Written 1992-94 by Donald Becker. | |
4 | ||
5 | Copyright 1993 United States Government as represented by the | |
6 | Director, National Security Agency. | |
7 | ||
8 | This software may be used and distributed according to the terms | |
9 | of the GNU General Public License, incorporated herein by reference. | |
10 | ||
11 | The author may be reached as [email protected], or C/O | |
12 | Scyld Computing Corporation | |
13 | 410 Severn Ave., Suite 210 | |
14 | Annapolis MD 21403 | |
15 | ||
16 | ||
17 | This is the chip-specific code for many 8390-based ethernet adaptors. | |
18 | This is not a complete driver, it must be combined with board-specific | |
19 | code such as ne.c, wd.c, 3c503.c, etc. | |
20 | ||
21 | Seeing how at least eight drivers use this code, (not counting the | |
22 | PCMCIA ones either) it is easy to break some card by what seems like | |
23 | a simple innocent change. Please contact me or Donald if you think | |
24 | you have found something that needs changing. -- PG | |
25 | ||
26 | ||
27 | Changelog: | |
28 | ||
29 | Paul Gortmaker : remove set_bit lock, other cleanups. | |
30 | Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to | |
31 | ei_block_input() for eth_io_copy_and_sum(). | |
32 | Paul Gortmaker : exchange static int ei_pingpong for a #define, | |
33 | also add better Tx error handling. | |
34 | Paul Gortmaker : rewrite Rx overrun handling as per NS specs. | |
35 | Alexey Kuznetsov : use the 8390's six bit hash multicast filter. | |
36 | Paul Gortmaker : tweak ANK's above multicast changes a bit. | |
37 | Paul Gortmaker : update packet statistics for v2.1.x | |
38 | Alan Cox : support arbitary stupid port mappings on the | |
39 | 68K Macintosh. Support >16bit I/O spaces | |
40 | Paul Gortmaker : add kmod support for auto-loading of the 8390 | |
41 | module by all drivers that require it. | |
42 | Alan Cox : Spinlocking work, added 'BUG_83C690' | |
43 | Paul Gortmaker : Separate out Tx timeout code from Tx path. | |
44 | Paul Gortmaker : Remove old unused single Tx buffer code. | |
45 | Hayato Fujiwara : Add m32r support. | |
46 | Paul Gortmaker : use skb_padto() instead of stack scratch area | |
47 | ||
48 | Sources: | |
49 | The National Semiconductor LAN Databook, and the 3Com 3c503 databook. | |
50 | ||
51 | */ | |
52 | ||
53 | #include <linux/module.h> | |
54 | #include <linux/kernel.h> | |
55 | #include <linux/jiffies.h> | |
56 | #include <linux/fs.h> | |
57 | #include <linux/types.h> | |
58 | #include <linux/string.h> | |
59 | #include <linux/bitops.h> | |
60 | #include <asm/system.h> | |
61 | #include <asm/uaccess.h> | |
62 | #include <asm/io.h> | |
63 | #include <asm/irq.h> | |
64 | #include <linux/delay.h> | |
65 | #include <linux/errno.h> | |
66 | #include <linux/fcntl.h> | |
67 | #include <linux/in.h> | |
68 | #include <linux/interrupt.h> | |
69 | #include <linux/init.h> | |
70 | #include <linux/crc32.h> | |
71 | ||
72 | #include <linux/netdevice.h> | |
73 | #include <linux/etherdevice.h> | |
74 | ||
75 | #define NS8390_CORE | |
76 | #include "8390.h" | |
77 | ||
78 | #define BUG_83C690 | |
79 | ||
80 | /* These are the operational function interfaces to board-specific | |
81 | routines. | |
82 | void reset_8390(struct net_device *dev) | |
83 | Resets the board associated with DEV, including a hardware reset of | |
84 | the 8390. This is only called when there is a transmit timeout, and | |
85 | it is always followed by 8390_init(). | |
86 | void block_output(struct net_device *dev, int count, const unsigned char *buf, | |
87 | int start_page) | |
88 | Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The | |
89 | "page" value uses the 8390's 256-byte pages. | |
90 | void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) | |
91 | Read the 4 byte, page aligned 8390 header. *If* there is a | |
92 | subsequent read, it will be of the rest of the packet. | |
93 | void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) | |
94 | Read COUNT bytes from the packet buffer into the skb data area. Start | |
95 | reading from RING_OFFSET, the address as the 8390 sees it. This will always | |
96 | follow the read of the 8390 header. | |
97 | */ | |
98 | #define ei_reset_8390 (ei_local->reset_8390) | |
99 | #define ei_block_output (ei_local->block_output) | |
100 | #define ei_block_input (ei_local->block_input) | |
101 | #define ei_get_8390_hdr (ei_local->get_8390_hdr) | |
102 | ||
103 | /* use 0 for production, 1 for verification, >2 for debug */ | |
104 | #ifndef ei_debug | |
105 | int ei_debug = 1; | |
106 | #endif | |
107 | ||
108 | /* Index to functions. */ | |
109 | static void ei_tx_intr(struct net_device *dev); | |
110 | static void ei_tx_err(struct net_device *dev); | |
4e4fd4e4 | 111 | void ei_tx_timeout(struct net_device *dev); |
6c3561b0 AV |
112 | static void ei_receive(struct net_device *dev); |
113 | static void ei_rx_overrun(struct net_device *dev); | |
114 | ||
115 | /* Routines generic to NS8390-based boards. */ | |
116 | static void NS8390_trigger_send(struct net_device *dev, unsigned int length, | |
117 | int start_page); | |
6c3561b0 AV |
118 | static void do_set_multicast_list(struct net_device *dev); |
119 | static void __NS8390_init(struct net_device *dev, int startp); | |
120 | ||
121 | /* | |
122 | * SMP and the 8390 setup. | |
123 | * | |
124 | * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is | |
125 | * a page register that controls bank and packet buffer access. We guard | |
126 | * this with ei_local->page_lock. Nobody should assume or set the page other | |
127 | * than zero when the lock is not held. Lock holders must restore page 0 | |
128 | * before unlocking. Even pure readers must take the lock to protect in | |
129 | * page 0. | |
130 | * | |
131 | * To make life difficult the chip can also be very slow. We therefore can't | |
132 | * just use spinlocks. For the longer lockups we disable the irq the device | |
133 | * sits on and hold the lock. We must hold the lock because there is a dual | |
134 | * processor case other than interrupts (get stats/set multicast list in | |
135 | * parallel with each other and transmit). | |
136 | * | |
137 | * Note: in theory we can just disable the irq on the card _but_ there is | |
138 | * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" | |
139 | * enter lock, take the queued irq. So we waddle instead of flying. | |
140 | * | |
141 | * Finally by special arrangement for the purpose of being generally | |
142 | * annoying the transmit function is called bh atomic. That places | |
143 | * restrictions on the user context callers as disable_irq won't save | |
144 | * them. | |
55b7b629 JP |
145 | * |
146 | * Additional explanation of problems with locking by Alan Cox: | |
147 | * | |
148 | * "The author (me) didn't use spin_lock_irqsave because the slowness of the | |
149 | * card means that approach caused horrible problems like losing serial data | |
14e4a0f2 | 150 | * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA |
55b7b629 | 151 | * chips with FPGA front ends. |
3f8cb098 | 152 | * |
55b7b629 | 153 | * Ok the logic behind the 8390 is very simple: |
3f8cb098 | 154 | * |
55b7b629 JP |
155 | * Things to know |
156 | * - IRQ delivery is asynchronous to the PCI bus | |
157 | * - Blocking the local CPU IRQ via spin locks was too slow | |
158 | * - The chip has register windows needing locking work | |
3f8cb098 | 159 | * |
55b7b629 JP |
160 | * So the path was once (I say once as people appear to have changed it |
161 | * in the mean time and it now looks rather bogus if the changes to use | |
162 | * disable_irq_nosync_irqsave are disabling the local IRQ) | |
3f8cb098 JG |
163 | * |
164 | * | |
55b7b629 JP |
165 | * Take the page lock |
166 | * Mask the IRQ on chip | |
167 | * Disable the IRQ (but not mask locally- someone seems to have | |
168 | * broken this with the lock validator stuff) | |
169 | * [This must be _nosync as the page lock may otherwise | |
170 | * deadlock us] | |
171 | * Drop the page lock and turn IRQs back on | |
3f8cb098 | 172 | * |
55b7b629 JP |
173 | * At this point an existing IRQ may still be running but we can't |
174 | * get a new one | |
3f8cb098 | 175 | * |
55b7b629 JP |
176 | * Take the lock (so we know the IRQ has terminated) but don't mask |
177 | * the IRQs on the processor | |
178 | * Set irqlock [for debug] | |
3f8cb098 | 179 | * |
55b7b629 | 180 | * Transmit (slow as ****) |
3f8cb098 | 181 | * |
55b7b629 | 182 | * re-enable the IRQ |
3f8cb098 JG |
183 | * |
184 | * | |
55b7b629 JP |
185 | * We have to use disable_irq because otherwise you will get delayed |
186 | * interrupts on the APIC bus deadlocking the transmit path. | |
3f8cb098 | 187 | * |
55b7b629 JP |
188 | * Quite hairy but the chip simply wasn't designed for SMP and you can't |
189 | * even ACK an interrupt without risking corrupting other parallel | |
190 | * activities on the chip." [lkml, 25 Jul 2007] | |
6c3561b0 AV |
191 | */ |
192 | ||
193 | ||
194 | ||
195 | /** | |
196 | * ei_open - Open/initialize the board. | |
197 | * @dev: network device to initialize | |
198 | * | |
199 | * This routine goes all-out, setting everything | |
200 | * up anew at each open, even though many of these registers should only | |
201 | * need to be set once at boot. | |
202 | */ | |
203 | static int __ei_open(struct net_device *dev) | |
204 | { | |
205 | unsigned long flags; | |
206 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | |
207 | ||
6c3561b0 AV |
208 | if (dev->watchdog_timeo <= 0) |
209 | dev->watchdog_timeo = TX_TIMEOUT; | |
210 | ||
211 | /* | |
212 | * Grab the page lock so we own the register set, then call | |
213 | * the init function. | |
214 | */ | |
215 | ||
216 | spin_lock_irqsave(&ei_local->page_lock, flags); | |
217 | __NS8390_init(dev, 1); | |
218 | /* Set the flag before we drop the lock, That way the IRQ arrives | |
219 | after its set and we get no silly warnings */ | |
220 | netif_start_queue(dev); | |
221 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | |
222 | ei_local->irqlock = 0; | |
223 | return 0; | |
224 | } | |
225 | ||
226 | /** | |
227 | * ei_close - shut down network device | |
228 | * @dev: network device to close | |
229 | * | |
230 | * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done. | |
231 | */ | |
232 | static int __ei_close(struct net_device *dev) | |
233 | { | |
234 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | |
235 | unsigned long flags; | |
236 | ||
237 | /* | |
238 | * Hold the page lock during close | |
239 | */ | |
240 | ||
241 | spin_lock_irqsave(&ei_local->page_lock, flags); | |
242 | __NS8390_init(dev, 0); | |
243 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | |
244 | netif_stop_queue(dev); | |
245 | return 0; | |
246 | } | |
247 | ||
248 | /** | |
249 | * ei_tx_timeout - handle transmit time out condition | |
250 | * @dev: network device which has apparently fallen asleep | |
251 | * | |
252 | * Called by kernel when device never acknowledges a transmit has | |
253 | * completed (or failed) - i.e. never posted a Tx related interrupt. | |
254 | */ | |
255 | ||
8884c092 | 256 | static void __ei_tx_timeout(struct net_device *dev) |
6c3561b0 AV |
257 | { |
258 | unsigned long e8390_base = dev->base_addr; | |
259 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | |
260 | int txsr, isr, tickssofar = jiffies - dev->trans_start; | |
261 | unsigned long flags; | |
262 | ||
244d74ff | 263 | dev->stats.tx_errors++; |
6c3561b0 AV |
264 | |
265 | spin_lock_irqsave(&ei_local->page_lock, flags); | |
266 | txsr = ei_inb(e8390_base+EN0_TSR); | |
267 | isr = ei_inb(e8390_base+EN0_ISR); | |
268 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | |
269 | ||
270 | printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n", | |
271 | dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : | |
272 | (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); | |
273 | ||
244d74ff | 274 | if (!isr && !dev->stats.tx_packets) |
6c3561b0 AV |
275 | { |
276 | /* The 8390 probably hasn't gotten on the cable yet. */ | |
277 | ei_local->interface_num ^= 1; /* Try a different xcvr. */ | |
278 | } | |
279 | ||
280 | /* Ugly but a reset can be slow, yet must be protected */ | |
281 | ||
282 | disable_irq_nosync_lockdep(dev->irq); | |
283 | spin_lock(&ei_local->page_lock); | |
284 | ||
285 | /* Try to restart the card. Perhaps the user has fixed something. */ | |
286 | ei_reset_8390(dev); | |
287 | __NS8390_init(dev, 1); | |
288 | ||
289 | spin_unlock(&ei_local->page_lock); | |
290 | enable_irq_lockdep(dev->irq); | |
291 | netif_wake_queue(dev); | |
292 | } | |
293 | ||
294 | /** | |
295 | * ei_start_xmit - begin packet transmission | |
296 | * @skb: packet to be sent | |
297 | * @dev: network device to which packet is sent | |
298 | * | |
299 | * Sends a packet to an 8390 network device. | |
300 | */ | |
301 | ||
61357325 SH |
302 | static netdev_tx_t __ei_start_xmit(struct sk_buff *skb, |
303 | struct net_device *dev) | |
6c3561b0 AV |
304 | { |
305 | unsigned long e8390_base = dev->base_addr; | |
306 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | |
307 | int send_length = skb->len, output_page; | |
308 | unsigned long flags; | |
309 | char buf[ETH_ZLEN]; | |
310 | char *data = skb->data; | |
311 | ||
312 | if (skb->len < ETH_ZLEN) { | |
313 | memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */ | |
314 | memcpy(buf, data, skb->len); | |
315 | send_length = ETH_ZLEN; | |
316 | data = buf; | |
317 | } | |
318 | ||
319 | /* Mask interrupts from the ethercard. | |
320 | SMP: We have to grab the lock here otherwise the IRQ handler | |
321 | on another CPU can flip window and race the IRQ mask set. We end | |
322 | up trashing the mcast filter not disabling irqs if we don't lock */ | |
323 | ||
324 | spin_lock_irqsave(&ei_local->page_lock, flags); | |
325 | ei_outb_p(0x00, e8390_base + EN0_IMR); | |
326 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | |
327 | ||
328 | ||
329 | /* | |
330 | * Slow phase with lock held. | |
331 | */ | |
332 | ||
333 | disable_irq_nosync_lockdep_irqsave(dev->irq, &flags); | |
334 | ||
335 | spin_lock(&ei_local->page_lock); | |
336 | ||
337 | ei_local->irqlock = 1; | |
338 | ||
339 | /* | |
340 | * We have two Tx slots available for use. Find the first free | |
341 | * slot, and then perform some sanity checks. With two Tx bufs, | |
342 | * you get very close to transmitting back-to-back packets. With | |
343 | * only one Tx buf, the transmitter sits idle while you reload the | |
344 | * card, leaving a substantial gap between each transmitted packet. | |
345 | */ | |
346 | ||
347 | if (ei_local->tx1 == 0) | |
348 | { | |
349 | output_page = ei_local->tx_start_page; | |
350 | ei_local->tx1 = send_length; | |
351 | if (ei_debug && ei_local->tx2 > 0) | |
352 | printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n", | |
353 | dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing); | |
354 | } | |
355 | else if (ei_local->tx2 == 0) | |
356 | { | |
357 | output_page = ei_local->tx_start_page + TX_PAGES/2; | |
358 | ei_local->tx2 = send_length; | |
359 | if (ei_debug && ei_local->tx1 > 0) | |
360 | printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n", | |
361 | dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing); | |
362 | } | |
363 | else | |
364 | { /* We should never get here. */ | |
365 | if (ei_debug) | |
366 | printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n", | |
367 | dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx); | |
368 | ei_local->irqlock = 0; | |
369 | netif_stop_queue(dev); | |
370 | ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); | |
371 | spin_unlock(&ei_local->page_lock); | |
372 | enable_irq_lockdep_irqrestore(dev->irq, &flags); | |
244d74ff | 373 | dev->stats.tx_errors++; |
5b548140 | 374 | return NETDEV_TX_BUSY; |
6c3561b0 AV |
375 | } |
376 | ||
377 | /* | |
378 | * Okay, now upload the packet and trigger a send if the transmitter | |
379 | * isn't already sending. If it is busy, the interrupt handler will | |
380 | * trigger the send later, upon receiving a Tx done interrupt. | |
381 | */ | |
382 | ||
383 | ei_block_output(dev, send_length, data, output_page); | |
384 | ||
385 | if (! ei_local->txing) | |
386 | { | |
387 | ei_local->txing = 1; | |
388 | NS8390_trigger_send(dev, send_length, output_page); | |
389 | dev->trans_start = jiffies; | |
390 | if (output_page == ei_local->tx_start_page) | |
391 | { | |
392 | ei_local->tx1 = -1; | |
393 | ei_local->lasttx = -1; | |
394 | } | |
395 | else | |
396 | { | |
397 | ei_local->tx2 = -1; | |
398 | ei_local->lasttx = -2; | |
399 | } | |
400 | } | |
401 | else ei_local->txqueue++; | |
402 | ||
403 | if (ei_local->tx1 && ei_local->tx2) | |
404 | netif_stop_queue(dev); | |
405 | else | |
406 | netif_start_queue(dev); | |
407 | ||
408 | /* Turn 8390 interrupts back on. */ | |
409 | ei_local->irqlock = 0; | |
410 | ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); | |
411 | ||
412 | spin_unlock(&ei_local->page_lock); | |
413 | enable_irq_lockdep_irqrestore(dev->irq, &flags); | |
414 | ||
415 | dev_kfree_skb (skb); | |
244d74ff | 416 | dev->stats.tx_bytes += send_length; |
6c3561b0 | 417 | |
ec634fe3 | 418 | return NETDEV_TX_OK; |
6c3561b0 AV |
419 | } |
420 | ||
421 | /** | |
422 | * ei_interrupt - handle the interrupts from an 8390 | |
423 | * @irq: interrupt number | |
424 | * @dev_id: a pointer to the net_device | |
425 | * | |
426 | * Handle the ether interface interrupts. We pull packets from | |
427 | * the 8390 via the card specific functions and fire them at the networking | |
428 | * stack. We also handle transmit completions and wake the transmit path if | |
429 | * necessary. We also update the counters and do other housekeeping as | |
430 | * needed. | |
431 | */ | |
432 | ||
433 | static irqreturn_t __ei_interrupt(int irq, void *dev_id) | |
434 | { | |
435 | struct net_device *dev = dev_id; | |
436 | unsigned long e8390_base = dev->base_addr; | |
437 | int interrupts, nr_serviced = 0; | |
438 | struct ei_device *ei_local = netdev_priv(dev); | |
439 | ||
440 | /* | |
441 | * Protect the irq test too. | |
442 | */ | |
443 | ||
444 | spin_lock(&ei_local->page_lock); | |
445 | ||
446 | if (ei_local->irqlock) | |
447 | { | |
448 | #if 1 /* This might just be an interrupt for a PCI device sharing this line */ | |
449 | /* The "irqlock" check is only for testing. */ | |
450 | printk(ei_local->irqlock | |
451 | ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n" | |
452 | : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n", | |
453 | dev->name, ei_inb_p(e8390_base + EN0_ISR), | |
454 | ei_inb_p(e8390_base + EN0_IMR)); | |
455 | #endif | |
456 | spin_unlock(&ei_local->page_lock); | |
457 | return IRQ_NONE; | |
458 | } | |
459 | ||
460 | /* Change to page 0 and read the intr status reg. */ | |
461 | ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); | |
462 | if (ei_debug > 3) | |
463 | printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name, | |
464 | ei_inb_p(e8390_base + EN0_ISR)); | |
465 | ||
466 | /* !!Assumption!! -- we stay in page 0. Don't break this. */ | |
8e95a202 JP |
467 | while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 && |
468 | ++nr_serviced < MAX_SERVICE) | |
6c3561b0 AV |
469 | { |
470 | if (!netif_running(dev)) { | |
471 | printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name); | |
472 | /* rmk - acknowledge the interrupts */ | |
473 | ei_outb_p(interrupts, e8390_base + EN0_ISR); | |
474 | interrupts = 0; | |
475 | break; | |
476 | } | |
477 | if (interrupts & ENISR_OVER) | |
478 | ei_rx_overrun(dev); | |
479 | else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) | |
480 | { | |
481 | /* Got a good (?) packet. */ | |
482 | ei_receive(dev); | |
483 | } | |
484 | /* Push the next to-transmit packet through. */ | |
485 | if (interrupts & ENISR_TX) | |
486 | ei_tx_intr(dev); | |
487 | else if (interrupts & ENISR_TX_ERR) | |
488 | ei_tx_err(dev); | |
489 | ||
490 | if (interrupts & ENISR_COUNTERS) | |
491 | { | |
244d74ff PZ |
492 | dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); |
493 | dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1); | |
494 | dev->stats.rx_missed_errors+= ei_inb_p(e8390_base + EN0_COUNTER2); | |
6c3561b0 AV |
495 | ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ |
496 | } | |
497 | ||
498 | /* Ignore any RDC interrupts that make it back to here. */ | |
499 | if (interrupts & ENISR_RDC) | |
500 | { | |
501 | ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR); | |
502 | } | |
503 | ||
504 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); | |
505 | } | |
506 | ||
507 | if (interrupts && ei_debug) | |
508 | { | |
509 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); | |
510 | if (nr_serviced >= MAX_SERVICE) | |
511 | { | |
512 | /* 0xFF is valid for a card removal */ | |
513 | if(interrupts!=0xFF) | |
514 | printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n", | |
515 | dev->name, interrupts); | |
516 | ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ | |
517 | } else { | |
518 | printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts); | |
519 | ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ | |
520 | } | |
521 | } | |
522 | spin_unlock(&ei_local->page_lock); | |
523 | return IRQ_RETVAL(nr_serviced > 0); | |
524 | } | |
525 | ||
526 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
527 | static void __ei_poll(struct net_device *dev) | |
528 | { | |
f47aeffd | 529 | disable_irq(dev->irq); |
6c3561b0 | 530 | __ei_interrupt(dev->irq, dev); |
f47aeffd | 531 | enable_irq(dev->irq); |
6c3561b0 AV |
532 | } |
533 | #endif | |
534 | ||
535 | /** | |
536 | * ei_tx_err - handle transmitter error | |
537 | * @dev: network device which threw the exception | |
538 | * | |
539 | * A transmitter error has happened. Most likely excess collisions (which | |
540 | * is a fairly normal condition). If the error is one where the Tx will | |
541 | * have been aborted, we try and send another one right away, instead of | |
542 | * letting the failed packet sit and collect dust in the Tx buffer. This | |
543 | * is a much better solution as it avoids kernel based Tx timeouts, and | |
544 | * an unnecessary card reset. | |
545 | * | |
546 | * Called with lock held. | |
547 | */ | |
548 | ||
549 | static void ei_tx_err(struct net_device *dev) | |
550 | { | |
551 | unsigned long e8390_base = dev->base_addr; | |
0c1aa20f SR |
552 | /* ei_local is used on some platforms via the EI_SHIFT macro */ |
553 | struct ei_device *ei_local __maybe_unused = netdev_priv(dev); | |
6c3561b0 AV |
554 | unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR); |
555 | unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); | |
556 | ||
557 | #ifdef VERBOSE_ERROR_DUMP | |
558 | printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr); | |
559 | if (txsr & ENTSR_ABT) | |
560 | printk("excess-collisions "); | |
561 | if (txsr & ENTSR_ND) | |
562 | printk("non-deferral "); | |
563 | if (txsr & ENTSR_CRS) | |
564 | printk("lost-carrier "); | |
565 | if (txsr & ENTSR_FU) | |
566 | printk("FIFO-underrun "); | |
567 | if (txsr & ENTSR_CDH) | |
568 | printk("lost-heartbeat "); | |
569 | printk("\n"); | |
570 | #endif | |
571 | ||
572 | ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */ | |
573 | ||
574 | if (tx_was_aborted) | |
575 | ei_tx_intr(dev); | |
576 | else | |
577 | { | |
244d74ff PZ |
578 | dev->stats.tx_errors++; |
579 | if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++; | |
580 | if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++; | |
581 | if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++; | |
6c3561b0 AV |
582 | } |
583 | } | |
584 | ||
585 | /** | |
586 | * ei_tx_intr - transmit interrupt handler | |
587 | * @dev: network device for which tx intr is handled | |
588 | * | |
589 | * We have finished a transmit: check for errors and then trigger the next | |
590 | * packet to be sent. Called with lock held. | |
591 | */ | |
592 | ||
593 | static void ei_tx_intr(struct net_device *dev) | |
594 | { | |
595 | unsigned long e8390_base = dev->base_addr; | |
596 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | |
597 | int status = ei_inb(e8390_base + EN0_TSR); | |
598 | ||
599 | ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ | |
600 | ||
601 | /* | |
602 | * There are two Tx buffers, see which one finished, and trigger | |
603 | * the send of another one if it exists. | |
604 | */ | |
605 | ei_local->txqueue--; | |
606 | ||
607 | if (ei_local->tx1 < 0) | |
608 | { | |
609 | if (ei_local->lasttx != 1 && ei_local->lasttx != -1) | |
610 | printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n", | |
611 | ei_local->name, ei_local->lasttx, ei_local->tx1); | |
612 | ei_local->tx1 = 0; | |
613 | if (ei_local->tx2 > 0) | |
614 | { | |
615 | ei_local->txing = 1; | |
616 | NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); | |
617 | dev->trans_start = jiffies; | |
618 | ei_local->tx2 = -1, | |
619 | ei_local->lasttx = 2; | |
620 | } | |
621 | else ei_local->lasttx = 20, ei_local->txing = 0; | |
622 | } | |
623 | else if (ei_local->tx2 < 0) | |
624 | { | |
625 | if (ei_local->lasttx != 2 && ei_local->lasttx != -2) | |
626 | printk("%s: bogus last_tx_buffer %d, tx2=%d.\n", | |
627 | ei_local->name, ei_local->lasttx, ei_local->tx2); | |
628 | ei_local->tx2 = 0; | |
629 | if (ei_local->tx1 > 0) | |
630 | { | |
631 | ei_local->txing = 1; | |
632 | NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); | |
633 | dev->trans_start = jiffies; | |
634 | ei_local->tx1 = -1; | |
635 | ei_local->lasttx = 1; | |
636 | } | |
637 | else | |
638 | ei_local->lasttx = 10, ei_local->txing = 0; | |
639 | } | |
640 | // else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n", | |
641 | // dev->name, ei_local->lasttx); | |
642 | ||
643 | /* Minimize Tx latency: update the statistics after we restart TXing. */ | |
644 | if (status & ENTSR_COL) | |
244d74ff | 645 | dev->stats.collisions++; |
6c3561b0 | 646 | if (status & ENTSR_PTX) |
244d74ff | 647 | dev->stats.tx_packets++; |
6c3561b0 AV |
648 | else |
649 | { | |
244d74ff | 650 | dev->stats.tx_errors++; |
6c3561b0 AV |
651 | if (status & ENTSR_ABT) |
652 | { | |
244d74ff PZ |
653 | dev->stats.tx_aborted_errors++; |
654 | dev->stats.collisions += 16; | |
6c3561b0 AV |
655 | } |
656 | if (status & ENTSR_CRS) | |
244d74ff | 657 | dev->stats.tx_carrier_errors++; |
6c3561b0 | 658 | if (status & ENTSR_FU) |
244d74ff | 659 | dev->stats.tx_fifo_errors++; |
6c3561b0 | 660 | if (status & ENTSR_CDH) |
244d74ff | 661 | dev->stats.tx_heartbeat_errors++; |
6c3561b0 | 662 | if (status & ENTSR_OWC) |
244d74ff | 663 | dev->stats.tx_window_errors++; |
6c3561b0 AV |
664 | } |
665 | netif_wake_queue(dev); | |
666 | } | |
667 | ||
668 | /** | |
669 | * ei_receive - receive some packets | |
670 | * @dev: network device with which receive will be run | |
671 | * | |
672 | * We have a good packet(s), get it/them out of the buffers. | |
673 | * Called with lock held. | |
674 | */ | |
675 | ||
676 | static void ei_receive(struct net_device *dev) | |
677 | { | |
678 | unsigned long e8390_base = dev->base_addr; | |
679 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | |
680 | unsigned char rxing_page, this_frame, next_frame; | |
681 | unsigned short current_offset; | |
682 | int rx_pkt_count = 0; | |
683 | struct e8390_pkt_hdr rx_frame; | |
684 | int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page; | |
685 | ||
686 | while (++rx_pkt_count < 10) | |
687 | { | |
688 | int pkt_len, pkt_stat; | |
689 | ||
690 | /* Get the rx page (incoming packet pointer). */ | |
691 | ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD); | |
692 | rxing_page = ei_inb_p(e8390_base + EN1_CURPAG); | |
693 | ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); | |
694 | ||
695 | /* Remove one frame from the ring. Boundary is always a page behind. */ | |
696 | this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1; | |
697 | if (this_frame >= ei_local->stop_page) | |
698 | this_frame = ei_local->rx_start_page; | |
699 | ||
700 | /* Someday we'll omit the previous, iff we never get this message. | |
701 | (There is at least one clone claimed to have a problem.) | |
702 | ||
703 | Keep quiet if it looks like a card removal. One problem here | |
704 | is that some clones crash in roughly the same way. | |
705 | */ | |
706 | if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF)) | |
707 | printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n", | |
708 | dev->name, this_frame, ei_local->current_page); | |
709 | ||
710 | if (this_frame == rxing_page) /* Read all the frames? */ | |
711 | break; /* Done for now */ | |
712 | ||
713 | current_offset = this_frame << 8; | |
714 | ei_get_8390_hdr(dev, &rx_frame, this_frame); | |
715 | ||
716 | pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); | |
717 | pkt_stat = rx_frame.status; | |
718 | ||
719 | next_frame = this_frame + 1 + ((pkt_len+4)>>8); | |
720 | ||
721 | /* Check for bogosity warned by 3c503 book: the status byte is never | |
722 | written. This happened a lot during testing! This code should be | |
723 | cleaned up someday. */ | |
8e95a202 JP |
724 | if (rx_frame.next != next_frame && |
725 | rx_frame.next != next_frame + 1 && | |
726 | rx_frame.next != next_frame - num_rx_pages && | |
727 | rx_frame.next != next_frame + 1 - num_rx_pages) { | |
6c3561b0 AV |
728 | ei_local->current_page = rxing_page; |
729 | ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); | |
244d74ff | 730 | dev->stats.rx_errors++; |
6c3561b0 AV |
731 | continue; |
732 | } | |
733 | ||
734 | if (pkt_len < 60 || pkt_len > 1518) | |
735 | { | |
736 | if (ei_debug) | |
737 | printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", | |
738 | dev->name, rx_frame.count, rx_frame.status, | |
739 | rx_frame.next); | |
244d74ff PZ |
740 | dev->stats.rx_errors++; |
741 | dev->stats.rx_length_errors++; | |
6c3561b0 AV |
742 | } |
743 | else if ((pkt_stat & 0x0F) == ENRSR_RXOK) | |
744 | { | |
745 | struct sk_buff *skb; | |
746 | ||
747 | skb = dev_alloc_skb(pkt_len+2); | |
748 | if (skb == NULL) | |
749 | { | |
750 | if (ei_debug > 1) | |
751 | printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", | |
752 | dev->name, pkt_len); | |
244d74ff | 753 | dev->stats.rx_dropped++; |
6c3561b0 AV |
754 | break; |
755 | } | |
756 | else | |
757 | { | |
758 | skb_reserve(skb,2); /* IP headers on 16 byte boundaries */ | |
6c3561b0 AV |
759 | skb_put(skb, pkt_len); /* Make room */ |
760 | ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); | |
761 | skb->protocol=eth_type_trans(skb,dev); | |
762 | netif_rx(skb); | |
244d74ff PZ |
763 | dev->stats.rx_packets++; |
764 | dev->stats.rx_bytes += pkt_len; | |
6c3561b0 | 765 | if (pkt_stat & ENRSR_PHY) |
244d74ff | 766 | dev->stats.multicast++; |
6c3561b0 AV |
767 | } |
768 | } | |
769 | else | |
770 | { | |
771 | if (ei_debug) | |
772 | printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", | |
773 | dev->name, rx_frame.status, rx_frame.next, | |
774 | rx_frame.count); | |
244d74ff | 775 | dev->stats.rx_errors++; |
6c3561b0 AV |
776 | /* NB: The NIC counts CRC, frame and missed errors. */ |
777 | if (pkt_stat & ENRSR_FO) | |
244d74ff | 778 | dev->stats.rx_fifo_errors++; |
6c3561b0 AV |
779 | } |
780 | next_frame = rx_frame.next; | |
781 | ||
782 | /* This _should_ never happen: it's here for avoiding bad clones. */ | |
783 | if (next_frame >= ei_local->stop_page) { | |
784 | printk("%s: next frame inconsistency, %#2x\n", dev->name, | |
785 | next_frame); | |
786 | next_frame = ei_local->rx_start_page; | |
787 | } | |
788 | ei_local->current_page = next_frame; | |
789 | ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); | |
790 | } | |
791 | ||
792 | /* We used to also ack ENISR_OVER here, but that would sometimes mask | |
793 | a real overrun, leaving the 8390 in a stopped state with rec'vr off. */ | |
794 | ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR); | |
795 | return; | |
796 | } | |
797 | ||
798 | /** | |
799 | * ei_rx_overrun - handle receiver overrun | |
800 | * @dev: network device which threw exception | |
801 | * | |
802 | * We have a receiver overrun: we have to kick the 8390 to get it started | |
803 | * again. Problem is that you have to kick it exactly as NS prescribes in | |
804 | * the updated datasheets, or "the NIC may act in an unpredictable manner." | |
805 | * This includes causing "the NIC to defer indefinitely when it is stopped | |
806 | * on a busy network." Ugh. | |
807 | * Called with lock held. Don't call this with the interrupts off or your | |
808 | * computer will hate you - it takes 10ms or so. | |
809 | */ | |
810 | ||
811 | static void ei_rx_overrun(struct net_device *dev) | |
812 | { | |
813 | unsigned long e8390_base = dev->base_addr; | |
814 | unsigned char was_txing, must_resend = 0; | |
0c1aa20f SR |
815 | /* ei_local is used on some platforms via the EI_SHIFT macro */ |
816 | struct ei_device *ei_local __maybe_unused = netdev_priv(dev); | |
6c3561b0 AV |
817 | |
818 | /* | |
819 | * Record whether a Tx was in progress and then issue the | |
820 | * stop command. | |
821 | */ | |
822 | was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS; | |
823 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); | |
824 | ||
825 | if (ei_debug > 1) | |
826 | printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); | |
244d74ff | 827 | dev->stats.rx_over_errors++; |
6c3561b0 AV |
828 | |
829 | /* | |
830 | * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. | |
831 | * Early datasheets said to poll the reset bit, but now they say that | |
832 | * it "is not a reliable indicator and subsequently should be ignored." | |
833 | * We wait at least 10ms. | |
834 | */ | |
835 | ||
836 | mdelay(10); | |
837 | ||
838 | /* | |
839 | * Reset RBCR[01] back to zero as per magic incantation. | |
840 | */ | |
841 | ei_outb_p(0x00, e8390_base+EN0_RCNTLO); | |
842 | ei_outb_p(0x00, e8390_base+EN0_RCNTHI); | |
843 | ||
844 | /* | |
845 | * See if any Tx was interrupted or not. According to NS, this | |
846 | * step is vital, and skipping it will cause no end of havoc. | |
847 | */ | |
848 | ||
849 | if (was_txing) | |
850 | { | |
851 | unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); | |
852 | if (!tx_completed) | |
853 | must_resend = 1; | |
854 | } | |
855 | ||
856 | /* | |
857 | * Have to enter loopback mode and then restart the NIC before | |
858 | * you are allowed to slurp packets up off the ring. | |
859 | */ | |
860 | ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); | |
861 | ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); | |
862 | ||
863 | /* | |
864 | * Clear the Rx ring of all the debris, and ack the interrupt. | |
865 | */ | |
866 | ei_receive(dev); | |
867 | ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR); | |
868 | ||
869 | /* | |
870 | * Leave loopback mode, and resend any packet that got stopped. | |
871 | */ | |
872 | ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); | |
873 | if (must_resend) | |
874 | ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); | |
875 | } | |
876 | ||
877 | /* | |
878 | * Collect the stats. This is called unlocked and from several contexts. | |
879 | */ | |
880 | ||
8884c092 | 881 | static struct net_device_stats *__ei_get_stats(struct net_device *dev) |
6c3561b0 AV |
882 | { |
883 | unsigned long ioaddr = dev->base_addr; | |
884 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | |
885 | unsigned long flags; | |
886 | ||
887 | /* If the card is stopped, just return the present stats. */ | |
888 | if (!netif_running(dev)) | |
244d74ff | 889 | return &dev->stats; |
6c3561b0 AV |
890 | |
891 | spin_lock_irqsave(&ei_local->page_lock,flags); | |
892 | /* Read the counter registers, assuming we are in page 0. */ | |
244d74ff PZ |
893 | dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); |
894 | dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1); | |
895 | dev->stats.rx_missed_errors+= ei_inb_p(ioaddr + EN0_COUNTER2); | |
6c3561b0 AV |
896 | spin_unlock_irqrestore(&ei_local->page_lock, flags); |
897 | ||
244d74ff | 898 | return &dev->stats; |
6c3561b0 AV |
899 | } |
900 | ||
901 | /* | |
902 | * Form the 64 bit 8390 multicast table from the linked list of addresses | |
903 | * associated with this dev structure. | |
904 | */ | |
905 | ||
906 | static inline void make_mc_bits(u8 *bits, struct net_device *dev) | |
907 | { | |
908 | struct dev_mc_list *dmi; | |
909 | ||
910 | for (dmi=dev->mc_list; dmi; dmi=dmi->next) | |
911 | { | |
912 | u32 crc; | |
913 | if (dmi->dmi_addrlen != ETH_ALEN) | |
914 | { | |
915 | printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name); | |
916 | continue; | |
917 | } | |
918 | crc = ether_crc(ETH_ALEN, dmi->dmi_addr); | |
919 | /* | |
920 | * The 8390 uses the 6 most significant bits of the | |
921 | * CRC to index the multicast table. | |
922 | */ | |
923 | bits[crc>>29] |= (1<<((crc>>26)&7)); | |
924 | } | |
925 | } | |
926 | ||
927 | /** | |
928 | * do_set_multicast_list - set/clear multicast filter | |
929 | * @dev: net device for which multicast filter is adjusted | |
930 | * | |
931 | * Set or clear the multicast filter for this adaptor. May be called | |
932 | * from a BH in 2.1.x. Must be called with lock held. | |
933 | */ | |
934 | ||
935 | static void do_set_multicast_list(struct net_device *dev) | |
936 | { | |
937 | unsigned long e8390_base = dev->base_addr; | |
938 | int i; | |
939 | struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); | |
940 | ||
941 | if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) | |
942 | { | |
943 | memset(ei_local->mcfilter, 0, 8); | |
944 | if (dev->mc_list) | |
945 | make_mc_bits(ei_local->mcfilter, dev); | |
946 | } | |
947 | else | |
948 | memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */ | |
949 | ||
950 | /* | |
951 | * DP8390 manuals don't specify any magic sequence for altering | |
952 | * the multicast regs on an already running card. To be safe, we | |
953 | * ensure multicast mode is off prior to loading up the new hash | |
954 | * table. If this proves to be not enough, we can always resort | |
955 | * to stopping the NIC, loading the table and then restarting. | |
956 | * | |
957 | * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC | |
958 | * Elite16) appear to be write-only. The NS 8390 data sheet lists | |
959 | * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and | |
960 | * Ultra32 EISA) appears to have this bug fixed. | |
961 | */ | |
962 | ||
963 | if (netif_running(dev)) | |
964 | ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); | |
965 | ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); | |
966 | for(i = 0; i < 8; i++) | |
967 | { | |
968 | ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); | |
969 | #ifndef BUG_83C690 | |
970 | if(ei_inb_p(e8390_base + EN1_MULT_SHIFT(i))!=ei_local->mcfilter[i]) | |
971 | printk(KERN_ERR "Multicast filter read/write mismap %d\n",i); | |
972 | #endif | |
973 | } | |
974 | ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); | |
975 | ||
976 | if(dev->flags&IFF_PROMISC) | |
977 | ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR); | |
978 | else if(dev->flags&IFF_ALLMULTI || dev->mc_list) | |
979 | ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR); | |
980 | else | |
981 | ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); | |
982 | } | |
983 | ||
984 | /* | |
985 | * Called without lock held. This is invoked from user context and may | |
986 | * be parallel to just about everything else. Its also fairly quick and | |
987 | * not called too often. Must protect against both bh and irq users | |
988 | */ | |
989 | ||
8884c092 | 990 | static void __ei_set_multicast_list(struct net_device *dev) |
6c3561b0 AV |
991 | { |
992 | unsigned long flags; | |
993 | struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); | |
994 | ||
995 | spin_lock_irqsave(&ei_local->page_lock, flags); | |
996 | do_set_multicast_list(dev); | |
997 | spin_unlock_irqrestore(&ei_local->page_lock, flags); | |
998 | } | |
999 | ||
1000 | /** | |
1001 | * ethdev_setup - init rest of 8390 device struct | |
1002 | * @dev: network device structure to init | |
1003 | * | |
1004 | * Initialize the rest of the 8390 device structure. Do NOT __init | |
1005 | * this, as it is used by 8390 based modular drivers too. | |
1006 | */ | |
1007 | ||
1008 | static void ethdev_setup(struct net_device *dev) | |
1009 | { | |
1010 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | |
1011 | if (ei_debug > 1) | |
1012 | printk(version); | |
1013 | ||
6c3561b0 AV |
1014 | ether_setup(dev); |
1015 | ||
1016 | spin_lock_init(&ei_local->page_lock); | |
1017 | } | |
1018 | ||
1019 | /** | |
1020 | * alloc_ei_netdev - alloc_etherdev counterpart for 8390 | |
1021 | * @size: extra bytes to allocate | |
1022 | * | |
1023 | * Allocate 8390-specific net_device. | |
1024 | */ | |
1025 | static struct net_device *____alloc_ei_netdev(int size) | |
1026 | { | |
1027 | return alloc_netdev(sizeof(struct ei_device) + size, "eth%d", | |
1028 | ethdev_setup); | |
1029 | } | |
1030 | ||
1031 | ||
1032 | ||
1033 | ||
1034 | /* This page of functions should be 8390 generic */ | |
1035 | /* Follow National Semi's recommendations for initializing the "NIC". */ | |
1036 | ||
1037 | /** | |
1038 | * NS8390_init - initialize 8390 hardware | |
1039 | * @dev: network device to initialize | |
1040 | * @startp: boolean. non-zero value to initiate chip processing | |
1041 | * | |
1042 | * Must be called with lock held. | |
1043 | */ | |
1044 | ||
1045 | static void __NS8390_init(struct net_device *dev, int startp) | |
1046 | { | |
1047 | unsigned long e8390_base = dev->base_addr; | |
1048 | struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); | |
1049 | int i; | |
1050 | int endcfg = ei_local->word16 | |
1051 | ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) | |
1052 | : 0x48; | |
1053 | ||
1054 | if(sizeof(struct e8390_pkt_hdr)!=4) | |
1055 | panic("8390.c: header struct mispacked\n"); | |
1056 | /* Follow National Semi's recommendations for initing the DP83902. */ | |
1057 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ | |
1058 | ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ | |
1059 | /* Clear the remote byte count registers. */ | |
1060 | ei_outb_p(0x00, e8390_base + EN0_RCNTLO); | |
1061 | ei_outb_p(0x00, e8390_base + EN0_RCNTHI); | |
1062 | /* Set to monitor and loopback mode -- this is vital!. */ | |
1063 | ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */ | |
1064 | ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ | |
1065 | /* Set the transmit page and receive ring. */ | |
1066 | ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); | |
1067 | ei_local->tx1 = ei_local->tx2 = 0; | |
1068 | ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); | |
1069 | ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ | |
1070 | ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ | |
1071 | ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); | |
1072 | /* Clear the pending interrupts and mask. */ | |
1073 | ei_outb_p(0xFF, e8390_base + EN0_ISR); | |
1074 | ei_outb_p(0x00, e8390_base + EN0_IMR); | |
1075 | ||
1076 | /* Copy the station address into the DS8390 registers. */ | |
1077 | ||
1078 | ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ | |
1079 | for(i = 0; i < 6; i++) | |
1080 | { | |
1081 | ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); | |
1082 | if (ei_debug > 1 && ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) | |
1083 | printk(KERN_ERR "Hw. address read/write mismap %d\n",i); | |
1084 | } | |
1085 | ||
1086 | ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); | |
1087 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); | |
1088 | ||
1089 | netif_start_queue(dev); | |
1090 | ei_local->tx1 = ei_local->tx2 = 0; | |
1091 | ei_local->txing = 0; | |
1092 | ||
1093 | if (startp) | |
1094 | { | |
1095 | ei_outb_p(0xff, e8390_base + EN0_ISR); | |
1096 | ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); | |
1097 | ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); | |
1098 | ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */ | |
1099 | /* 3c503 TechMan says rxconfig only after the NIC is started. */ | |
1100 | ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */ | |
1101 | do_set_multicast_list(dev); /* (re)load the mcast table */ | |
1102 | } | |
1103 | } | |
1104 | ||
1105 | /* Trigger a transmit start, assuming the length is valid. | |
1106 | Always called with the page lock held */ | |
1107 | ||
1108 | static void NS8390_trigger_send(struct net_device *dev, unsigned int length, | |
1109 | int start_page) | |
1110 | { | |
1111 | unsigned long e8390_base = dev->base_addr; | |
1112 | struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); | |
1113 | ||
1114 | ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); | |
1115 | ||
1116 | if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) | |
1117 | { | |
1118 | printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", | |
1119 | dev->name); | |
1120 | return; | |
1121 | } | |
1122 | ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO); | |
1123 | ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI); | |
1124 | ei_outb_p(start_page, e8390_base + EN0_TPSR); | |
1125 | ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); | |
1126 | } |