1 /*******************************************************************************
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 * e100.c: Intel(R) PRO/100 ethernet driver
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
55 * II. Driver Operation
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
130 * testing/troubleshooting the development driver.
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
137 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
138 * - Stratus87247: protect MDI control register manipulations
141 #include <linux/module.h>
142 #include <linux/moduleparam.h>
143 #include <linux/kernel.h>
144 #include <linux/types.h>
145 #include <linux/slab.h>
146 #include <linux/delay.h>
147 #include <linux/init.h>
148 #include <linux/pci.h>
149 #include <linux/dma-mapping.h>
150 #include <linux/netdevice.h>
151 #include <linux/etherdevice.h>
152 #include <linux/mii.h>
153 #include <linux/if_vlan.h>
154 #include <linux/skbuff.h>
155 #include <linux/ethtool.h>
156 #include <linux/string.h>
157 #include <asm/unaligned.h>
160 #define DRV_NAME "e100"
161 #define DRV_EXT "-NAPI"
162 #define DRV_VERSION "3.5.23-k4"DRV_EXT
163 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
164 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
165 #define PFX DRV_NAME ": "
167 #define E100_WATCHDOG_PERIOD (2 * HZ)
168 #define E100_NAPI_WEIGHT 16
170 MODULE_DESCRIPTION(DRV_DESCRIPTION);
171 MODULE_AUTHOR(DRV_COPYRIGHT);
172 MODULE_LICENSE("GPL");
173 MODULE_VERSION(DRV_VERSION);
175 static int debug = 3;
176 static int eeprom_bad_csum_allow = 0;
177 static int use_io = 0;
178 module_param(debug, int, 0);
179 module_param(eeprom_bad_csum_allow, int, 0);
180 module_param(use_io, int, 0);
181 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
182 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
183 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
184 #define DPRINTK(nlevel, klevel, fmt, args...) \
185 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
186 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
187 __FUNCTION__ , ## args))
189 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
190 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
191 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
192 static struct pci_device_id e100_id_table[] = {
193 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
194 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
195 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
196 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
197 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
198 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
199 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
200 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
201 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
202 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
203 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
204 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
205 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
206 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
207 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
208 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
209 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
210 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
211 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
212 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
213 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
214 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
216 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
217 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
218 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
219 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
220 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
221 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
222 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
223 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
224 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
225 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
226 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
227 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
228 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
229 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
230 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
231 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
232 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
233 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
236 MODULE_DEVICE_TABLE(pci, e100_id_table);
239 mac_82557_D100_A = 0,
240 mac_82557_D100_B = 1,
241 mac_82557_D100_C = 2,
242 mac_82558_D101_A4 = 4,
243 mac_82558_D101_B0 = 5,
247 mac_82550_D102_C = 13,
255 phy_100a = 0x000003E0,
256 phy_100c = 0x035002A8,
257 phy_82555_tx = 0x015002A8,
258 phy_nsc_tx = 0x5C002000,
259 phy_82562_et = 0x033002A8,
260 phy_82562_em = 0x032002A8,
261 phy_82562_ek = 0x031002A8,
262 phy_82562_eh = 0x017002A8,
263 phy_unknown = 0xFFFFFFFF,
266 /* CSR (Control/Status Registers) */
291 RU_UNINITIALIZED = -1,
295 stat_ack_not_ours = 0x00,
296 stat_ack_sw_gen = 0x04,
298 stat_ack_cu_idle = 0x20,
299 stat_ack_frame_rx = 0x40,
300 stat_ack_cu_cmd_done = 0x80,
301 stat_ack_not_present = 0xFF,
302 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
303 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
307 irq_mask_none = 0x00,
315 ruc_load_base = 0x06,
318 cuc_dump_addr = 0x40,
319 cuc_dump_stats = 0x50,
320 cuc_load_base = 0x60,
321 cuc_dump_reset = 0x70,
325 cuc_dump_complete = 0x0000A005,
326 cuc_dump_reset_complete = 0x0000A007,
330 software_reset = 0x0000,
332 selective_reset = 0x0002,
335 enum eeprom_ctrl_lo {
343 mdi_write = 0x04000000,
344 mdi_read = 0x08000000,
345 mdi_ready = 0x10000000,
355 enum eeprom_offsets {
356 eeprom_cnfg_mdix = 0x03,
358 eeprom_config_asf = 0x0D,
359 eeprom_smbus_addr = 0x90,
362 enum eeprom_cnfg_mdix {
363 eeprom_mdix_enabled = 0x0080,
367 eeprom_id_wol = 0x0020,
370 enum eeprom_config_asf {
376 cb_complete = 0x8000,
405 struct rx *next, *prev;
410 #if defined(__BIG_ENDIAN_BITFIELD)
416 /*0*/ u8 X(byte_count:6, pad0:2);
417 /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
418 /*2*/ u8 adaptive_ifs;
419 /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
420 term_write_cache_line:1), pad3:4);
421 /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
422 /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
423 /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
424 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
425 rx_discard_overruns:1), rx_save_bad_frames:1);
426 /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
427 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
429 /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
430 /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
431 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
432 /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
434 /*11*/ u8 X(linear_priority:3, pad11:5);
435 /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
436 /*13*/ u8 ip_addr_lo;
437 /*14*/ u8 ip_addr_hi;
438 /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
439 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
440 pad15_2:1), crs_or_cdt:1);
441 /*16*/ u8 fc_delay_lo;
442 /*17*/ u8 fc_delay_hi;
443 /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
444 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
445 /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
446 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
447 full_duplex_force:1), full_duplex_pin:1);
448 /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
449 /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
450 /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
454 #define E100_MAX_MULTICAST_ADDRS 64
457 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
460 /* Important: keep total struct u32-aligned */
461 #define UCODE_SIZE 134
468 u32 ucode[UCODE_SIZE];
469 struct config config;
482 u32 dump_buffer_addr;
484 struct cb *next, *prev;
490 lb_none = 0, lb_mac = 1, lb_phy = 3,
494 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
495 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
496 tx_multiple_collisions, tx_total_collisions;
497 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
498 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
499 rx_short_frame_errors;
500 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
501 u16 xmt_tco_frames, rcv_tco_frames;
521 struct param_range rfds;
522 struct param_range cbs;
526 /* Begin: frequently used values: keep adjacent for cache effect */
527 u32 msg_enable ____cacheline_aligned;
528 struct net_device *netdev;
529 struct pci_dev *pdev;
531 struct rx *rxs ____cacheline_aligned;
532 struct rx *rx_to_use;
533 struct rx *rx_to_clean;
534 struct rfd blank_rfd;
535 enum ru_state ru_running;
537 spinlock_t cb_lock ____cacheline_aligned;
539 struct csr __iomem *csr;
540 enum scb_cmd_lo cuc_cmd;
541 unsigned int cbs_avail;
542 struct napi_struct napi;
544 struct cb *cb_to_use;
545 struct cb *cb_to_send;
546 struct cb *cb_to_clean;
548 /* End: frequently used values: keep adjacent for cache effect */
552 promiscuous = (1 << 1),
553 multicast_all = (1 << 2),
554 wol_magic = (1 << 3),
555 ich_10h_workaround = (1 << 4),
556 } flags ____cacheline_aligned;
560 struct params params;
561 struct net_device_stats net_stats;
562 struct timer_list watchdog;
563 struct timer_list blink_timer;
564 struct mii_if_info mii;
565 struct work_struct tx_timeout_task;
566 enum loopback loopback;
571 dma_addr_t cbs_dma_addr;
577 u32 tx_single_collisions;
578 u32 tx_multiple_collisions;
583 u32 rx_fc_unsupported;
585 u32 rx_over_length_errors;
590 spinlock_t mdio_lock;
593 static inline void e100_write_flush(struct nic *nic)
595 /* Flush previous PCI writes through intermediate bridges
596 * by doing a benign read */
597 (void)ioread8(&nic->csr->scb.status);
600 static void e100_enable_irq(struct nic *nic)
604 spin_lock_irqsave(&nic->cmd_lock, flags);
605 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
606 e100_write_flush(nic);
607 spin_unlock_irqrestore(&nic->cmd_lock, flags);
610 static void e100_disable_irq(struct nic *nic)
614 spin_lock_irqsave(&nic->cmd_lock, flags);
615 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
616 e100_write_flush(nic);
617 spin_unlock_irqrestore(&nic->cmd_lock, flags);
620 static void e100_hw_reset(struct nic *nic)
622 /* Put CU and RU into idle with a selective reset to get
623 * device off of PCI bus */
624 iowrite32(selective_reset, &nic->csr->port);
625 e100_write_flush(nic); udelay(20);
627 /* Now fully reset device */
628 iowrite32(software_reset, &nic->csr->port);
629 e100_write_flush(nic); udelay(20);
631 /* Mask off our interrupt line - it's unmasked after reset */
632 e100_disable_irq(nic);
635 static int e100_self_test(struct nic *nic)
637 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
639 /* Passing the self-test is a pretty good indication
640 * that the device can DMA to/from host memory */
642 nic->mem->selftest.signature = 0;
643 nic->mem->selftest.result = 0xFFFFFFFF;
645 iowrite32(selftest | dma_addr, &nic->csr->port);
646 e100_write_flush(nic);
647 /* Wait 10 msec for self-test to complete */
650 /* Interrupts are enabled after self-test */
651 e100_disable_irq(nic);
653 /* Check results of self-test */
654 if(nic->mem->selftest.result != 0) {
655 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
656 nic->mem->selftest.result);
659 if(nic->mem->selftest.signature == 0) {
660 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
667 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
669 u32 cmd_addr_data[3];
673 /* Three cmds: write/erase enable, write data, write/erase disable */
674 cmd_addr_data[0] = op_ewen << (addr_len - 2);
675 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
677 cmd_addr_data[2] = op_ewds << (addr_len - 2);
679 /* Bit-bang cmds to write word to eeprom */
680 for(j = 0; j < 3; j++) {
683 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
684 e100_write_flush(nic); udelay(4);
686 for(i = 31; i >= 0; i--) {
687 ctrl = (cmd_addr_data[j] & (1 << i)) ?
689 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
690 e100_write_flush(nic); udelay(4);
692 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
693 e100_write_flush(nic); udelay(4);
695 /* Wait 10 msec for cmd to complete */
699 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
700 e100_write_flush(nic); udelay(4);
704 /* General technique stolen from the eepro100 driver - very clever */
705 static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
712 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
715 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
716 e100_write_flush(nic); udelay(4);
718 /* Bit-bang to read word from eeprom */
719 for(i = 31; i >= 0; i--) {
720 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
721 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
722 e100_write_flush(nic); udelay(4);
724 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
725 e100_write_flush(nic); udelay(4);
727 /* Eeprom drives a dummy zero to EEDO after receiving
728 * complete address. Use this to adjust addr_len. */
729 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
730 if(!(ctrl & eedo) && i > 16) {
731 *addr_len -= (i - 16);
735 data = (data << 1) | (ctrl & eedo ? 1 : 0);
739 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
740 e100_write_flush(nic); udelay(4);
742 return le16_to_cpu(data);
745 /* Load entire EEPROM image into driver cache and validate checksum */
746 static int e100_eeprom_load(struct nic *nic)
748 u16 addr, addr_len = 8, checksum = 0;
750 /* Try reading with an 8-bit addr len to discover actual addr len */
751 e100_eeprom_read(nic, &addr_len, 0);
752 nic->eeprom_wc = 1 << addr_len;
754 for(addr = 0; addr < nic->eeprom_wc; addr++) {
755 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
756 if(addr < nic->eeprom_wc - 1)
757 checksum += cpu_to_le16(nic->eeprom[addr]);
760 /* The checksum, stored in the last word, is calculated such that
761 * the sum of words should be 0xBABA */
762 checksum = le16_to_cpu(0xBABA - checksum);
763 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
764 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
765 if (!eeprom_bad_csum_allow)
772 /* Save (portion of) driver EEPROM cache to device and update checksum */
773 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
775 u16 addr, addr_len = 8, checksum = 0;
777 /* Try reading with an 8-bit addr len to discover actual addr len */
778 e100_eeprom_read(nic, &addr_len, 0);
779 nic->eeprom_wc = 1 << addr_len;
781 if(start + count >= nic->eeprom_wc)
784 for(addr = start; addr < start + count; addr++)
785 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
787 /* The checksum, stored in the last word, is calculated such that
788 * the sum of words should be 0xBABA */
789 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
790 checksum += cpu_to_le16(nic->eeprom[addr]);
791 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
792 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
793 nic->eeprom[nic->eeprom_wc - 1]);
798 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
799 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */
800 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
806 spin_lock_irqsave(&nic->cmd_lock, flags);
808 /* Previous command is accepted when SCB clears */
809 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
810 if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
813 if(unlikely(i > E100_WAIT_SCB_FAST))
816 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
821 if(unlikely(cmd != cuc_resume))
822 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
823 iowrite8(cmd, &nic->csr->scb.cmd_lo);
826 spin_unlock_irqrestore(&nic->cmd_lock, flags);
831 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
832 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
838 spin_lock_irqsave(&nic->cb_lock, flags);
840 if(unlikely(!nic->cbs_avail)) {
846 nic->cb_to_use = cb->next;
850 if(unlikely(!nic->cbs_avail))
853 cb_prepare(nic, cb, skb);
855 /* Order is important otherwise we'll be in a race with h/w:
856 * set S-bit in current first, then clear S-bit in previous. */
857 cb->command |= cpu_to_le16(cb_s);
859 cb->prev->command &= cpu_to_le16(~cb_s);
861 while(nic->cb_to_send != nic->cb_to_use) {
862 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
863 nic->cb_to_send->dma_addr))) {
864 /* Ok, here's where things get sticky. It's
865 * possible that we can't schedule the command
866 * because the controller is too busy, so
867 * let's just queue the command and try again
868 * when another command is scheduled. */
871 schedule_work(&nic->tx_timeout_task);
875 nic->cuc_cmd = cuc_resume;
876 nic->cb_to_send = nic->cb_to_send->next;
881 spin_unlock_irqrestore(&nic->cb_lock, flags);
886 static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
894 * Stratus87247: we shouldn't be writing the MDI control
895 * register until the Ready bit shows True. Also, since
896 * manipulation of the MDI control registers is a multi-step
897 * procedure it should be done under lock.
899 spin_lock_irqsave(&nic->mdio_lock, flags);
900 for (i = 100; i; --i) {
901 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
906 printk("e100.mdio_ctrl(%s) won't go Ready\n",
908 spin_unlock_irqrestore(&nic->mdio_lock, flags);
909 return 0; /* No way to indicate timeout error */
911 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
913 for (i = 0; i < 100; i++) {
915 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
918 spin_unlock_irqrestore(&nic->mdio_lock, flags);
920 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
921 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
922 return (u16)data_out;
925 static int mdio_read(struct net_device *netdev, int addr, int reg)
927 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
930 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
932 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
935 static void e100_get_defaults(struct nic *nic)
937 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
938 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
940 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
941 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
942 if(nic->mac == mac_unknown)
943 nic->mac = mac_82557_D100_A;
945 nic->params.rfds = rfds;
946 nic->params.cbs = cbs;
948 /* Quadwords to DMA into FIFO before starting frame transmit */
949 nic->tx_threshold = 0xE0;
951 /* no interrupt for every tx completion, delay = 256us if not 557*/
952 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
953 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
955 /* Template for a freshly allocated RFD */
956 nic->blank_rfd.command = cpu_to_le16(cb_el);
957 nic->blank_rfd.rbd = 0xFFFFFFFF;
958 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
961 nic->mii.phy_id_mask = 0x1F;
962 nic->mii.reg_num_mask = 0x1F;
963 nic->mii.dev = nic->netdev;
964 nic->mii.mdio_read = mdio_read;
965 nic->mii.mdio_write = mdio_write;
968 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
970 struct config *config = &cb->u.config;
971 u8 *c = (u8 *)config;
973 cb->command = cpu_to_le16(cb_config);
975 memset(config, 0, sizeof(struct config));
977 config->byte_count = 0x16; /* bytes in this struct */
978 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
979 config->direct_rx_dma = 0x1; /* reserved */
980 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
981 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
982 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
983 config->tx_underrun_retry = 0x3; /* # of underrun retries */
984 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
986 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
987 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
988 config->ifs = 0x6; /* x16 = inter frame spacing */
989 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
990 config->pad15_1 = 0x1;
991 config->pad15_2 = 0x1;
992 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
993 config->fc_delay_hi = 0x40; /* time delay for fc frame */
994 config->tx_padding = 0x1; /* 1=pad short frames */
995 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
997 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
998 config->pad20_1 = 0x1F;
999 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1000 config->pad21_1 = 0x5;
1002 config->adaptive_ifs = nic->adaptive_ifs;
1003 config->loopback = nic->loopback;
1005 if(nic->mii.force_media && nic->mii.full_duplex)
1006 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1008 if(nic->flags & promiscuous || nic->loopback) {
1009 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1010 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1011 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1014 if(nic->flags & multicast_all)
1015 config->multicast_all = 0x1; /* 1=accept, 0=no */
1017 /* disable WoL when up */
1018 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1019 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1021 if(nic->mac >= mac_82558_D101_A4) {
1022 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1023 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1024 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1025 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1026 if (nic->mac >= mac_82559_D101M) {
1027 config->tno_intr = 0x1; /* TCO stats enable */
1028 /* Enable TCO in extended config */
1029 if (nic->mac >= mac_82551_10) {
1030 config->byte_count = 0x20; /* extended bytes */
1031 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1034 config->standard_stat_counter = 0x0;
1038 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1039 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1040 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1041 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1042 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1043 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1046 /********************************************************/
1047 /* Micro code for 8086:1229 Rev 8 */
1048 /********************************************************/
1050 /* Parameter values for the D101M B-step */
1051 #define D101M_CPUSAVER_TIMER_DWORD 78
1052 #define D101M_CPUSAVER_BUNDLE_DWORD 65
1053 #define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1055 #define D101M_B_RCVBUNDLE_UCODE \
1057 0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
1058 0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
1059 0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
1060 0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
1061 0x00380438, 0x00000000, 0x00140000, 0x00380555, \
1062 0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
1063 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
1064 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
1065 0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
1066 0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
1067 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1068 0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
1069 0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
1070 0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
1071 0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
1072 0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
1073 0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
1074 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1075 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1076 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
1077 0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
1078 0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
1079 0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
1080 0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
1081 0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
1082 0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
1083 0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
1084 0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
1085 0x00380559, 0x00000000, 0x00000000, 0x00000000, \
1086 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1087 0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
1088 0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
1089 0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1092 /********************************************************/
1093 /* Micro code for 8086:1229 Rev 9 */
1094 /********************************************************/
1096 /* Parameter values for the D101S */
1097 #define D101S_CPUSAVER_TIMER_DWORD 78
1098 #define D101S_CPUSAVER_BUNDLE_DWORD 67
1099 #define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1101 #define D101S_RCVBUNDLE_UCODE \
1103 0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
1104 0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
1105 0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
1106 0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
1107 0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
1108 0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
1109 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
1110 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
1111 0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
1112 0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
1113 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1114 0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
1115 0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
1116 0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
1117 0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
1118 0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
1119 0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
1120 0x00101313, 0x00380700, 0x00000000, 0x00000000, \
1121 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1122 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
1123 0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
1124 0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
1125 0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
1126 0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
1127 0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
1128 0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
1129 0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
1130 0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
1131 0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
1132 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1133 0x00000000, 0x00000000, 0x00000000, 0x00130831, \
1134 0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
1135 0x00041000, 0x00010004, 0x00380700 \
1138 /********************************************************/
1139 /* Micro code for the 8086:1229 Rev F/10 */
1140 /********************************************************/
1142 /* Parameter values for the D102 E-step */
1143 #define D102_E_CPUSAVER_TIMER_DWORD 42
1144 #define D102_E_CPUSAVER_BUNDLE_DWORD 54
1145 #define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1147 #define D102_E_RCVBUNDLE_UCODE \
1149 0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
1150 0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
1151 0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
1152 0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
1153 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1154 0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
1155 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1156 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1157 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1158 0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
1159 0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
1160 0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
1161 0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
1162 0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
1163 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1164 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1165 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1166 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
1167 0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
1168 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1169 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1170 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1171 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1172 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1173 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1174 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1175 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1176 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1177 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1178 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1179 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1180 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1181 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1184 static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1188 u32 ucode[UCODE_SIZE + 1];
1194 { D101M_B_RCVBUNDLE_UCODE,
1196 D101M_CPUSAVER_TIMER_DWORD,
1197 D101M_CPUSAVER_BUNDLE_DWORD,
1198 D101M_CPUSAVER_MIN_SIZE_DWORD },
1199 { D101S_RCVBUNDLE_UCODE,
1201 D101S_CPUSAVER_TIMER_DWORD,
1202 D101S_CPUSAVER_BUNDLE_DWORD,
1203 D101S_CPUSAVER_MIN_SIZE_DWORD },
1204 { D102_E_RCVBUNDLE_UCODE,
1206 D102_E_CPUSAVER_TIMER_DWORD,
1207 D102_E_CPUSAVER_BUNDLE_DWORD,
1208 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1209 { D102_E_RCVBUNDLE_UCODE,
1211 D102_E_CPUSAVER_TIMER_DWORD,
1212 D102_E_CPUSAVER_BUNDLE_DWORD,
1213 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1218 /*************************************************************************
1219 * CPUSaver parameters
1221 * All CPUSaver parameters are 16-bit literals that are part of a
1222 * "move immediate value" instruction. By changing the value of
1223 * the literal in the instruction before the code is loaded, the
1224 * driver can change the algorithm.
1226 * INTDELAY - This loads the dead-man timer with its initial value.
1227 * When this timer expires the interrupt is asserted, and the
1228 * timer is reset each time a new packet is received. (see
1229 * BUNDLEMAX below to set the limit on number of chained packets)
1230 * The current default is 0x600 or 1536. Experiments show that
1231 * the value should probably stay within the 0x200 - 0x1000.
1234 * This sets the maximum number of frames that will be bundled. In
1235 * some situations, such as the TCP windowing algorithm, it may be
1236 * better to limit the growth of the bundle size than let it go as
1237 * high as it can, because that could cause too much added latency.
1238 * The default is six, because this is the number of packets in the
1239 * default TCP window size. A value of 1 would make CPUSaver indicate
1240 * an interrupt for every frame received. If you do not want to put
1241 * a limit on the bundle size, set this value to xFFFF.
1244 * This contains a bit-mask describing the minimum size frame that
1245 * will be bundled. The default masks the lower 7 bits, which means
1246 * that any frame less than 128 bytes in length will not be bundled,
1247 * but will instead immediately generate an interrupt. This does
1248 * not affect the current bundle in any way. Any frame that is 128
1249 * bytes or large will be bundled normally. This feature is meant
1250 * to provide immediate indication of ACK frames in a TCP environment.
1251 * Customers were seeing poor performance when a machine with CPUSaver
1252 * enabled was sending but not receiving. The delay introduced when
1253 * the ACKs were received was enough to reduce total throughput, because
1254 * the sender would sit idle until the ACK was finally seen.
1256 * The current default is 0xFF80, which masks out the lower 7 bits.
1257 * This means that any frame which is x7F (127) bytes or smaller
1258 * will cause an immediate interrupt. Because this value must be a
1259 * bit mask, there are only a few valid values that can be used. To
1260 * turn this feature off, the driver can write the value xFFFF to the
1261 * lower word of this instruction (in the same way that the other
1262 * parameters are used). Likewise, a value of 0xF800 (2047) would
1263 * cause an interrupt to be generated for every frame, because all
1264 * standard Ethernet frames are <= 2047 bytes in length.
1265 *************************************************************************/
1267 /* if you wish to disable the ucode functionality, while maintaining the
1268 * workarounds it provides, set the following defines to:
1273 #define BUNDLESMALL 1
1274 #define BUNDLEMAX (u16)6
1275 #define INTDELAY (u16)1536 /* 0x600 */
1277 /* do not load u-code for ICH devices */
1278 if (nic->flags & ich)
1281 /* Search for ucode match against h/w revision */
1282 for (opts = ucode_opts; opts->mac; opts++) {
1284 u32 *ucode = opts->ucode;
1285 if (nic->mac != opts->mac)
1288 /* Insert user-tunable settings */
1289 ucode[opts->timer_dword] &= 0xFFFF0000;
1290 ucode[opts->timer_dword] |= INTDELAY;
1291 ucode[opts->bundle_dword] &= 0xFFFF0000;
1292 ucode[opts->bundle_dword] |= BUNDLEMAX;
1293 ucode[opts->min_size_dword] &= 0xFFFF0000;
1294 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1296 for (i = 0; i < UCODE_SIZE; i++)
1297 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1298 cb->command = cpu_to_le16(cb_ucode | cb_el);
1303 cb->command = cpu_to_le16(cb_nop | cb_el);
1306 static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1307 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1309 int err = 0, counter = 50;
1310 struct cb *cb = nic->cb_to_clean;
1312 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1313 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
1315 /* must restart cuc */
1316 nic->cuc_cmd = cuc_start;
1318 /* wait for completion */
1319 e100_write_flush(nic);
1322 /* wait for possibly (ouch) 500ms */
1323 while (!(cb->status & cpu_to_le16(cb_complete))) {
1325 if (!--counter) break;
1328 /* ack any interupts, something could have been set */
1329 iowrite8(~0, &nic->csr->scb.stat_ack);
1331 /* if the command failed, or is not OK, notify and return */
1332 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1333 DPRINTK(PROBE,ERR, "ucode load failed\n");
1340 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1341 struct sk_buff *skb)
1343 cb->command = cpu_to_le16(cb_iaaddr);
1344 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1347 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1349 cb->command = cpu_to_le16(cb_dump);
1350 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1351 offsetof(struct mem, dump_buf));
1354 #define NCONFIG_AUTO_SWITCH 0x0080
1355 #define MII_NSC_CONG MII_RESV1
1356 #define NSC_CONG_ENABLE 0x0100
1357 #define NSC_CONG_TXREADY 0x0400
1358 #define ADVERTISE_FC_SUPPORTED 0x0400
1359 static int e100_phy_init(struct nic *nic)
1361 struct net_device *netdev = nic->netdev;
1363 u16 bmcr, stat, id_lo, id_hi, cong;
1365 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1366 for(addr = 0; addr < 32; addr++) {
1367 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1368 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1369 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1370 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1371 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1374 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1378 /* Selected the phy and isolate the rest */
1379 for(addr = 0; addr < 32; addr++) {
1380 if(addr != nic->mii.phy_id) {
1381 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1383 bmcr = mdio_read(netdev, addr, MII_BMCR);
1384 mdio_write(netdev, addr, MII_BMCR,
1385 bmcr & ~BMCR_ISOLATE);
1390 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1391 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1392 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1393 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1395 /* Handle National tx phys */
1396 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1397 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1398 /* Disable congestion control */
1399 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1400 cong |= NSC_CONG_TXREADY;
1401 cong &= ~NSC_CONG_ENABLE;
1402 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1405 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1406 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1407 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1408 /* enable/disable MDI/MDI-X auto-switching. */
1409 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1410 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1416 static int e100_hw_init(struct nic *nic)
1422 DPRINTK(HW, ERR, "e100_hw_init\n");
1423 if(!in_interrupt() && (err = e100_self_test(nic)))
1426 if((err = e100_phy_init(nic)))
1428 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1430 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1432 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1434 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1436 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1438 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1439 nic->dma_addr + offsetof(struct mem, stats))))
1441 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1444 e100_disable_irq(nic);
1449 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1451 struct net_device *netdev = nic->netdev;
1452 struct dev_mc_list *list = netdev->mc_list;
1453 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1455 cb->command = cpu_to_le16(cb_multi);
1456 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1457 for(i = 0; list && i < count; i++, list = list->next)
1458 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1462 static void e100_set_multicast_list(struct net_device *netdev)
1464 struct nic *nic = netdev_priv(netdev);
1466 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1467 netdev->mc_count, netdev->flags);
1469 if(netdev->flags & IFF_PROMISC)
1470 nic->flags |= promiscuous;
1472 nic->flags &= ~promiscuous;
1474 if(netdev->flags & IFF_ALLMULTI ||
1475 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1476 nic->flags |= multicast_all;
1478 nic->flags &= ~multicast_all;
1480 e100_exec_cb(nic, NULL, e100_configure);
1481 e100_exec_cb(nic, NULL, e100_multi);
1484 static void e100_update_stats(struct nic *nic)
1486 struct net_device_stats *ns = &nic->net_stats;
1487 struct stats *s = &nic->mem->stats;
1488 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1489 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1492 /* Device's stats reporting may take several microseconds to
1493 * complete, so where always waiting for results of the
1494 * previous command. */
1496 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1498 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1499 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1500 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1501 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1502 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1503 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1504 ns->collisions += nic->tx_collisions;
1505 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1506 le32_to_cpu(s->tx_lost_crs);
1507 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1508 nic->rx_over_length_errors;
1509 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1510 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1511 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1512 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1513 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1514 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1515 le32_to_cpu(s->rx_alignment_errors) +
1516 le32_to_cpu(s->rx_short_frame_errors) +
1517 le32_to_cpu(s->rx_cdt_errors);
1518 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1519 nic->tx_single_collisions +=
1520 le32_to_cpu(s->tx_single_collisions);
1521 nic->tx_multiple_collisions +=
1522 le32_to_cpu(s->tx_multiple_collisions);
1523 if(nic->mac >= mac_82558_D101_A4) {
1524 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1525 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1526 nic->rx_fc_unsupported +=
1527 le32_to_cpu(s->fc_rcv_unsupported);
1528 if(nic->mac >= mac_82559_D101M) {
1529 nic->tx_tco_frames +=
1530 le16_to_cpu(s->xmt_tco_frames);
1531 nic->rx_tco_frames +=
1532 le16_to_cpu(s->rcv_tco_frames);
1538 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1539 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1542 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1544 /* Adjust inter-frame-spacing (IFS) between two transmits if
1545 * we're getting collisions on a half-duplex connection. */
1547 if(duplex == DUPLEX_HALF) {
1548 u32 prev = nic->adaptive_ifs;
1549 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1551 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1552 (nic->tx_frames > min_frames)) {
1553 if(nic->adaptive_ifs < 60)
1554 nic->adaptive_ifs += 5;
1555 } else if (nic->tx_frames < min_frames) {
1556 if(nic->adaptive_ifs >= 5)
1557 nic->adaptive_ifs -= 5;
1559 if(nic->adaptive_ifs != prev)
1560 e100_exec_cb(nic, NULL, e100_configure);
1564 static void e100_watchdog(unsigned long data)
1566 struct nic *nic = (struct nic *)data;
1567 struct ethtool_cmd cmd;
1569 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1571 /* mii library handles link maintenance tasks */
1573 mii_ethtool_gset(&nic->mii, &cmd);
1575 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1576 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1577 cmd.speed == SPEED_100 ? "100" : "10",
1578 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1579 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1580 DPRINTK(LINK, INFO, "link down\n");
1583 mii_check_link(&nic->mii);
1585 /* Software generated interrupt to recover from (rare) Rx
1586 * allocation failure.
1587 * Unfortunately have to use a spinlock to not re-enable interrupts
1588 * accidentally, due to hardware that shares a register between the
1589 * interrupt mask bit and the SW Interrupt generation bit */
1590 spin_lock_irq(&nic->cmd_lock);
1591 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1592 e100_write_flush(nic);
1593 spin_unlock_irq(&nic->cmd_lock);
1595 e100_update_stats(nic);
1596 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1598 if(nic->mac <= mac_82557_D100_C)
1599 /* Issue a multicast command to workaround a 557 lock up */
1600 e100_set_multicast_list(nic->netdev);
1602 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1603 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1604 nic->flags |= ich_10h_workaround;
1606 nic->flags &= ~ich_10h_workaround;
1608 mod_timer(&nic->watchdog,
1609 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1612 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1613 struct sk_buff *skb)
1615 cb->command = nic->tx_command;
1616 /* interrupt every 16 packets regardless of delay */
1617 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1618 cb->command |= cpu_to_le16(cb_i);
1619 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1620 cb->u.tcb.tcb_byte_count = 0;
1621 cb->u.tcb.threshold = nic->tx_threshold;
1622 cb->u.tcb.tbd_count = 1;
1623 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1624 skb->data, skb->len, PCI_DMA_TODEVICE));
1625 /* check for mapping failure? */
1626 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1629 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1631 struct nic *nic = netdev_priv(netdev);
1634 if(nic->flags & ich_10h_workaround) {
1635 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1636 Issue a NOP command followed by a 1us delay before
1637 issuing the Tx command. */
1638 if(e100_exec_cmd(nic, cuc_nop, 0))
1639 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1643 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1647 /* We queued the skb, but now we're out of space. */
1648 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1649 netif_stop_queue(netdev);
1652 /* This is a hard error - log it. */
1653 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1654 netif_stop_queue(netdev);
1658 netdev->trans_start = jiffies;
1662 static int e100_tx_clean(struct nic *nic)
1667 spin_lock(&nic->cb_lock);
1669 /* Clean CBs marked complete */
1670 for(cb = nic->cb_to_clean;
1671 cb->status & cpu_to_le16(cb_complete);
1672 cb = nic->cb_to_clean = cb->next) {
1673 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1674 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1677 if(likely(cb->skb != NULL)) {
1678 nic->net_stats.tx_packets++;
1679 nic->net_stats.tx_bytes += cb->skb->len;
1681 pci_unmap_single(nic->pdev,
1682 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1683 le16_to_cpu(cb->u.tcb.tbd.size),
1685 dev_kfree_skb_any(cb->skb);
1693 spin_unlock(&nic->cb_lock);
1695 /* Recover from running out of Tx resources in xmit_frame */
1696 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1697 netif_wake_queue(nic->netdev);
1702 static void e100_clean_cbs(struct nic *nic)
1705 while(nic->cbs_avail != nic->params.cbs.count) {
1706 struct cb *cb = nic->cb_to_clean;
1708 pci_unmap_single(nic->pdev,
1709 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1710 le16_to_cpu(cb->u.tcb.tbd.size),
1712 dev_kfree_skb(cb->skb);
1714 nic->cb_to_clean = nic->cb_to_clean->next;
1717 pci_free_consistent(nic->pdev,
1718 sizeof(struct cb) * nic->params.cbs.count,
1719 nic->cbs, nic->cbs_dma_addr);
1723 nic->cuc_cmd = cuc_start;
1724 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1728 static int e100_alloc_cbs(struct nic *nic)
1731 unsigned int i, count = nic->params.cbs.count;
1733 nic->cuc_cmd = cuc_start;
1734 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1737 nic->cbs = pci_alloc_consistent(nic->pdev,
1738 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1742 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1743 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1744 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1746 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1747 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1748 ((i+1) % count) * sizeof(struct cb));
1752 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1753 nic->cbs_avail = count;
1758 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1760 if(!nic->rxs) return;
1761 if(RU_SUSPENDED != nic->ru_running) return;
1763 /* handle init time starts */
1764 if(!rx) rx = nic->rxs;
1766 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1768 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1769 nic->ru_running = RU_RUNNING;
1773 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1774 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1776 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1779 /* Align, init, and map the RFD. */
1780 skb_reserve(rx->skb, NET_IP_ALIGN);
1781 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1782 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1783 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1785 if(pci_dma_mapping_error(rx->dma_addr)) {
1786 dev_kfree_skb_any(rx->skb);
1792 /* Link the RFD to end of RFA by linking previous RFD to
1793 * this one, and clearing EL bit of previous. */
1795 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1796 put_unaligned(cpu_to_le32(rx->dma_addr),
1797 (u32 *)&prev_rfd->link);
1799 prev_rfd->command &= ~cpu_to_le16(cb_el);
1800 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1801 sizeof(struct rfd), PCI_DMA_TODEVICE);
1807 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1808 unsigned int *work_done, unsigned int work_to_do)
1810 struct sk_buff *skb = rx->skb;
1811 struct rfd *rfd = (struct rfd *)skb->data;
1812 u16 rfd_status, actual_size;
1814 if(unlikely(work_done && *work_done >= work_to_do))
1817 /* Need to sync before taking a peek at cb_complete bit */
1818 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1819 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1820 rfd_status = le16_to_cpu(rfd->status);
1822 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1824 /* If data isn't ready, nothing to indicate */
1825 if(unlikely(!(rfd_status & cb_complete)))
1828 /* Get actual data size */
1829 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1830 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1831 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1834 pci_unmap_single(nic->pdev, rx->dma_addr,
1835 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1837 /* this allows for a fast restart without re-enabling interrupts */
1838 if(le16_to_cpu(rfd->command) & cb_el)
1839 nic->ru_running = RU_SUSPENDED;
1841 /* Pull off the RFD and put the actual data (minus eth hdr) */
1842 skb_reserve(skb, sizeof(struct rfd));
1843 skb_put(skb, actual_size);
1844 skb->protocol = eth_type_trans(skb, nic->netdev);
1846 if(unlikely(!(rfd_status & cb_ok))) {
1847 /* Don't indicate if hardware indicates errors */
1848 dev_kfree_skb_any(skb);
1849 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1850 /* Don't indicate oversized frames */
1851 nic->rx_over_length_errors++;
1852 dev_kfree_skb_any(skb);
1854 nic->net_stats.rx_packets++;
1855 nic->net_stats.rx_bytes += actual_size;
1856 nic->netdev->last_rx = jiffies;
1857 netif_receive_skb(skb);
1867 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1868 unsigned int work_to_do)
1871 int restart_required = 0;
1872 struct rx *rx_to_start = NULL;
1874 /* are we already rnr? then pay attention!!! this ensures that
1875 * the state machine progression never allows a start with a
1876 * partially cleaned list, avoiding a race between hardware
1877 * and rx_to_clean when in NAPI mode */
1878 if(RU_SUSPENDED == nic->ru_running)
1879 restart_required = 1;
1881 /* Indicate newly arrived packets */
1882 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1883 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1884 if(-EAGAIN == err) {
1885 /* hit quota so have more work to do, restart once
1886 * cleanup is complete */
1887 restart_required = 0;
1889 } else if(-ENODATA == err)
1890 break; /* No more to clean */
1893 /* save our starting point as the place we'll restart the receiver */
1894 if(restart_required)
1895 rx_to_start = nic->rx_to_clean;
1897 /* Alloc new skbs to refill list */
1898 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1899 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1900 break; /* Better luck next time (see watchdog) */
1903 if(restart_required) {
1905 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1906 e100_start_receiver(nic, rx_to_start);
1912 static void e100_rx_clean_list(struct nic *nic)
1915 unsigned int i, count = nic->params.rfds.count;
1917 nic->ru_running = RU_UNINITIALIZED;
1920 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1922 pci_unmap_single(nic->pdev, rx->dma_addr,
1923 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1924 dev_kfree_skb(rx->skb);
1931 nic->rx_to_use = nic->rx_to_clean = NULL;
1934 static int e100_rx_alloc_list(struct nic *nic)
1937 unsigned int i, count = nic->params.rfds.count;
1939 nic->rx_to_use = nic->rx_to_clean = NULL;
1940 nic->ru_running = RU_UNINITIALIZED;
1942 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1945 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1946 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1947 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1948 if(e100_rx_alloc_skb(nic, rx)) {
1949 e100_rx_clean_list(nic);
1954 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1955 nic->ru_running = RU_SUSPENDED;
1960 static irqreturn_t e100_intr(int irq, void *dev_id)
1962 struct net_device *netdev = dev_id;
1963 struct nic *nic = netdev_priv(netdev);
1964 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1966 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1968 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1969 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1972 /* Ack interrupt(s) */
1973 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1975 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1976 if(stat_ack & stat_ack_rnr)
1977 nic->ru_running = RU_SUSPENDED;
1979 if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
1980 e100_disable_irq(nic);
1981 __netif_rx_schedule(netdev, &nic->napi);
1987 static int e100_poll(struct napi_struct *napi, int budget)
1989 struct nic *nic = container_of(napi, struct nic, napi);
1990 struct net_device *netdev = nic->netdev;
1994 e100_rx_clean(nic, &work_done, budget);
1995 tx_cleaned = e100_tx_clean(nic);
1997 /* If no Rx and Tx cleanup work was done, exit polling mode. */
1998 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1999 netif_rx_complete(netdev, napi);
2000 e100_enable_irq(nic);
2006 #ifdef CONFIG_NET_POLL_CONTROLLER
2007 static void e100_netpoll(struct net_device *netdev)
2009 struct nic *nic = netdev_priv(netdev);
2011 e100_disable_irq(nic);
2012 e100_intr(nic->pdev->irq, netdev);
2014 e100_enable_irq(nic);
2018 static struct net_device_stats *e100_get_stats(struct net_device *netdev)
2020 struct nic *nic = netdev_priv(netdev);
2021 return &nic->net_stats;
2024 static int e100_set_mac_address(struct net_device *netdev, void *p)
2026 struct nic *nic = netdev_priv(netdev);
2027 struct sockaddr *addr = p;
2029 if (!is_valid_ether_addr(addr->sa_data))
2030 return -EADDRNOTAVAIL;
2032 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2033 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2038 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2040 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2042 netdev->mtu = new_mtu;
2046 static int e100_asf(struct nic *nic)
2048 /* ASF can be enabled from eeprom */
2049 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2050 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2051 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2052 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2055 static int e100_up(struct nic *nic)
2059 if((err = e100_rx_alloc_list(nic)))
2061 if((err = e100_alloc_cbs(nic)))
2062 goto err_rx_clean_list;
2063 if((err = e100_hw_init(nic)))
2065 e100_set_multicast_list(nic->netdev);
2066 e100_start_receiver(nic, NULL);
2067 mod_timer(&nic->watchdog, jiffies);
2068 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2069 nic->netdev->name, nic->netdev)))
2071 netif_wake_queue(nic->netdev);
2072 napi_enable(&nic->napi);
2073 /* enable ints _after_ enabling poll, preventing a race between
2074 * disable ints+schedule */
2075 e100_enable_irq(nic);
2079 del_timer_sync(&nic->watchdog);
2081 e100_clean_cbs(nic);
2083 e100_rx_clean_list(nic);
2087 static void e100_down(struct nic *nic)
2089 /* wait here for poll to complete */
2090 napi_disable(&nic->napi);
2091 netif_stop_queue(nic->netdev);
2093 free_irq(nic->pdev->irq, nic->netdev);
2094 del_timer_sync(&nic->watchdog);
2095 netif_carrier_off(nic->netdev);
2096 e100_clean_cbs(nic);
2097 e100_rx_clean_list(nic);
2100 static void e100_tx_timeout(struct net_device *netdev)
2102 struct nic *nic = netdev_priv(netdev);
2104 /* Reset outside of interrupt context, to avoid request_irq
2105 * in interrupt context */
2106 schedule_work(&nic->tx_timeout_task);
2109 static void e100_tx_timeout_task(struct work_struct *work)
2111 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2112 struct net_device *netdev = nic->netdev;
2114 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2115 ioread8(&nic->csr->scb.status));
2116 e100_down(netdev_priv(netdev));
2117 e100_up(netdev_priv(netdev));
2120 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2123 struct sk_buff *skb;
2125 /* Use driver resources to perform internal MAC or PHY
2126 * loopback test. A single packet is prepared and transmitted
2127 * in loopback mode, and the test passes if the received
2128 * packet compares byte-for-byte to the transmitted packet. */
2130 if((err = e100_rx_alloc_list(nic)))
2132 if((err = e100_alloc_cbs(nic)))
2135 /* ICH PHY loopback is broken so do MAC loopback instead */
2136 if(nic->flags & ich && loopback_mode == lb_phy)
2137 loopback_mode = lb_mac;
2139 nic->loopback = loopback_mode;
2140 if((err = e100_hw_init(nic)))
2141 goto err_loopback_none;
2143 if(loopback_mode == lb_phy)
2144 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2147 e100_start_receiver(nic, NULL);
2149 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2151 goto err_loopback_none;
2153 skb_put(skb, ETH_DATA_LEN);
2154 memset(skb->data, 0xFF, ETH_DATA_LEN);
2155 e100_xmit_frame(skb, nic->netdev);
2159 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2160 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2162 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2163 skb->data, ETH_DATA_LEN))
2167 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2168 nic->loopback = lb_none;
2169 e100_clean_cbs(nic);
2172 e100_rx_clean_list(nic);
2176 #define MII_LED_CONTROL 0x1B
2177 static void e100_blink_led(unsigned long data)
2179 struct nic *nic = (struct nic *)data;
2187 nic->leds = (nic->leds & led_on) ? led_off :
2188 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2189 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2190 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2193 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2195 struct nic *nic = netdev_priv(netdev);
2196 return mii_ethtool_gset(&nic->mii, cmd);
2199 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2201 struct nic *nic = netdev_priv(netdev);
2204 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2205 err = mii_ethtool_sset(&nic->mii, cmd);
2206 e100_exec_cb(nic, NULL, e100_configure);
2211 static void e100_get_drvinfo(struct net_device *netdev,
2212 struct ethtool_drvinfo *info)
2214 struct nic *nic = netdev_priv(netdev);
2215 strcpy(info->driver, DRV_NAME);
2216 strcpy(info->version, DRV_VERSION);
2217 strcpy(info->fw_version, "N/A");
2218 strcpy(info->bus_info, pci_name(nic->pdev));
2221 static int e100_get_regs_len(struct net_device *netdev)
2223 struct nic *nic = netdev_priv(netdev);
2224 #define E100_PHY_REGS 0x1C
2225 #define E100_REGS_LEN 1 + E100_PHY_REGS + \
2226 sizeof(nic->mem->dump_buf) / sizeof(u32)
2227 return E100_REGS_LEN * sizeof(u32);
2230 static void e100_get_regs(struct net_device *netdev,
2231 struct ethtool_regs *regs, void *p)
2233 struct nic *nic = netdev_priv(netdev);
2237 regs->version = (1 << 24) | nic->pdev->revision;
2238 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2239 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2240 ioread16(&nic->csr->scb.status);
2241 for(i = E100_PHY_REGS; i >= 0; i--)
2242 buff[1 + E100_PHY_REGS - i] =
2243 mdio_read(netdev, nic->mii.phy_id, i);
2244 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2245 e100_exec_cb(nic, NULL, e100_dump);
2247 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2248 sizeof(nic->mem->dump_buf));
2251 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2253 struct nic *nic = netdev_priv(netdev);
2254 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2255 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2258 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2260 struct nic *nic = netdev_priv(netdev);
2262 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2266 nic->flags |= wol_magic;
2268 nic->flags &= ~wol_magic;
2270 e100_exec_cb(nic, NULL, e100_configure);
2275 static u32 e100_get_msglevel(struct net_device *netdev)
2277 struct nic *nic = netdev_priv(netdev);
2278 return nic->msg_enable;
2281 static void e100_set_msglevel(struct net_device *netdev, u32 value)
2283 struct nic *nic = netdev_priv(netdev);
2284 nic->msg_enable = value;
2287 static int e100_nway_reset(struct net_device *netdev)
2289 struct nic *nic = netdev_priv(netdev);
2290 return mii_nway_restart(&nic->mii);
2293 static u32 e100_get_link(struct net_device *netdev)
2295 struct nic *nic = netdev_priv(netdev);
2296 return mii_link_ok(&nic->mii);
2299 static int e100_get_eeprom_len(struct net_device *netdev)
2301 struct nic *nic = netdev_priv(netdev);
2302 return nic->eeprom_wc << 1;
2305 #define E100_EEPROM_MAGIC 0x1234
2306 static int e100_get_eeprom(struct net_device *netdev,
2307 struct ethtool_eeprom *eeprom, u8 *bytes)
2309 struct nic *nic = netdev_priv(netdev);
2311 eeprom->magic = E100_EEPROM_MAGIC;
2312 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2317 static int e100_set_eeprom(struct net_device *netdev,
2318 struct ethtool_eeprom *eeprom, u8 *bytes)
2320 struct nic *nic = netdev_priv(netdev);
2322 if(eeprom->magic != E100_EEPROM_MAGIC)
2325 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2327 return e100_eeprom_save(nic, eeprom->offset >> 1,
2328 (eeprom->len >> 1) + 1);
2331 static void e100_get_ringparam(struct net_device *netdev,
2332 struct ethtool_ringparam *ring)
2334 struct nic *nic = netdev_priv(netdev);
2335 struct param_range *rfds = &nic->params.rfds;
2336 struct param_range *cbs = &nic->params.cbs;
2338 ring->rx_max_pending = rfds->max;
2339 ring->tx_max_pending = cbs->max;
2340 ring->rx_mini_max_pending = 0;
2341 ring->rx_jumbo_max_pending = 0;
2342 ring->rx_pending = rfds->count;
2343 ring->tx_pending = cbs->count;
2344 ring->rx_mini_pending = 0;
2345 ring->rx_jumbo_pending = 0;
2348 static int e100_set_ringparam(struct net_device *netdev,
2349 struct ethtool_ringparam *ring)
2351 struct nic *nic = netdev_priv(netdev);
2352 struct param_range *rfds = &nic->params.rfds;
2353 struct param_range *cbs = &nic->params.cbs;
2355 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2358 if(netif_running(netdev))
2360 rfds->count = max(ring->rx_pending, rfds->min);
2361 rfds->count = min(rfds->count, rfds->max);
2362 cbs->count = max(ring->tx_pending, cbs->min);
2363 cbs->count = min(cbs->count, cbs->max);
2364 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2365 rfds->count, cbs->count);
2366 if(netif_running(netdev))
2372 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2373 "Link test (on/offline)",
2374 "Eeprom test (on/offline)",
2375 "Self test (offline)",
2376 "Mac loopback (offline)",
2377 "Phy loopback (offline)",
2379 #define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2381 static int e100_diag_test_count(struct net_device *netdev)
2383 return E100_TEST_LEN;
2386 static void e100_diag_test(struct net_device *netdev,
2387 struct ethtool_test *test, u64 *data)
2389 struct ethtool_cmd cmd;
2390 struct nic *nic = netdev_priv(netdev);
2393 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2394 data[0] = !mii_link_ok(&nic->mii);
2395 data[1] = e100_eeprom_load(nic);
2396 if(test->flags & ETH_TEST_FL_OFFLINE) {
2398 /* save speed, duplex & autoneg settings */
2399 err = mii_ethtool_gset(&nic->mii, &cmd);
2401 if(netif_running(netdev))
2403 data[2] = e100_self_test(nic);
2404 data[3] = e100_loopback_test(nic, lb_mac);
2405 data[4] = e100_loopback_test(nic, lb_phy);
2407 /* restore speed, duplex & autoneg settings */
2408 err = mii_ethtool_sset(&nic->mii, &cmd);
2410 if(netif_running(netdev))
2413 for(i = 0; i < E100_TEST_LEN; i++)
2414 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2416 msleep_interruptible(4 * 1000);
2419 static int e100_phys_id(struct net_device *netdev, u32 data)
2421 struct nic *nic = netdev_priv(netdev);
2423 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2424 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2425 mod_timer(&nic->blink_timer, jiffies);
2426 msleep_interruptible(data * 1000);
2427 del_timer_sync(&nic->blink_timer);
2428 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2433 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2434 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2435 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2436 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2437 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2438 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2439 "tx_heartbeat_errors", "tx_window_errors",
2440 /* device-specific stats */
2441 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2442 "tx_flow_control_pause", "rx_flow_control_pause",
2443 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2445 #define E100_NET_STATS_LEN 21
2446 #define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2448 static int e100_get_stats_count(struct net_device *netdev)
2450 return E100_STATS_LEN;
2453 static void e100_get_ethtool_stats(struct net_device *netdev,
2454 struct ethtool_stats *stats, u64 *data)
2456 struct nic *nic = netdev_priv(netdev);
2459 for(i = 0; i < E100_NET_STATS_LEN; i++)
2460 data[i] = ((unsigned long *)&nic->net_stats)[i];
2462 data[i++] = nic->tx_deferred;
2463 data[i++] = nic->tx_single_collisions;
2464 data[i++] = nic->tx_multiple_collisions;
2465 data[i++] = nic->tx_fc_pause;
2466 data[i++] = nic->rx_fc_pause;
2467 data[i++] = nic->rx_fc_unsupported;
2468 data[i++] = nic->tx_tco_frames;
2469 data[i++] = nic->rx_tco_frames;
2472 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2476 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2479 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2484 static const struct ethtool_ops e100_ethtool_ops = {
2485 .get_settings = e100_get_settings,
2486 .set_settings = e100_set_settings,
2487 .get_drvinfo = e100_get_drvinfo,
2488 .get_regs_len = e100_get_regs_len,
2489 .get_regs = e100_get_regs,
2490 .get_wol = e100_get_wol,
2491 .set_wol = e100_set_wol,
2492 .get_msglevel = e100_get_msglevel,
2493 .set_msglevel = e100_set_msglevel,
2494 .nway_reset = e100_nway_reset,
2495 .get_link = e100_get_link,
2496 .get_eeprom_len = e100_get_eeprom_len,
2497 .get_eeprom = e100_get_eeprom,
2498 .set_eeprom = e100_set_eeprom,
2499 .get_ringparam = e100_get_ringparam,
2500 .set_ringparam = e100_set_ringparam,
2501 .self_test_count = e100_diag_test_count,
2502 .self_test = e100_diag_test,
2503 .get_strings = e100_get_strings,
2504 .phys_id = e100_phys_id,
2505 .get_stats_count = e100_get_stats_count,
2506 .get_ethtool_stats = e100_get_ethtool_stats,
2509 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2511 struct nic *nic = netdev_priv(netdev);
2513 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2516 static int e100_alloc(struct nic *nic)
2518 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2520 return nic->mem ? 0 : -ENOMEM;
2523 static void e100_free(struct nic *nic)
2526 pci_free_consistent(nic->pdev, sizeof(struct mem),
2527 nic->mem, nic->dma_addr);
2532 static int e100_open(struct net_device *netdev)
2534 struct nic *nic = netdev_priv(netdev);
2537 netif_carrier_off(netdev);
2538 if((err = e100_up(nic)))
2539 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2543 static int e100_close(struct net_device *netdev)
2545 e100_down(netdev_priv(netdev));
2549 static int __devinit e100_probe(struct pci_dev *pdev,
2550 const struct pci_device_id *ent)
2552 struct net_device *netdev;
2556 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2557 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2558 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2562 netdev->open = e100_open;
2563 netdev->stop = e100_close;
2564 netdev->hard_start_xmit = e100_xmit_frame;
2565 netdev->get_stats = e100_get_stats;
2566 netdev->set_multicast_list = e100_set_multicast_list;
2567 netdev->set_mac_address = e100_set_mac_address;
2568 netdev->change_mtu = e100_change_mtu;
2569 netdev->do_ioctl = e100_do_ioctl;
2570 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2571 netdev->tx_timeout = e100_tx_timeout;
2572 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2573 #ifdef CONFIG_NET_POLL_CONTROLLER
2574 netdev->poll_controller = e100_netpoll;
2576 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2578 nic = netdev_priv(netdev);
2579 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2580 nic->netdev = netdev;
2582 nic->msg_enable = (1 << debug) - 1;
2583 pci_set_drvdata(pdev, netdev);
2585 if((err = pci_enable_device(pdev))) {
2586 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2587 goto err_out_free_dev;
2590 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2591 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2592 "base address, aborting.\n");
2594 goto err_out_disable_pdev;
2597 if((err = pci_request_regions(pdev, DRV_NAME))) {
2598 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2599 goto err_out_disable_pdev;
2602 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
2603 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2604 goto err_out_free_res;
2607 SET_MODULE_OWNER(netdev);
2608 SET_NETDEV_DEV(netdev, &pdev->dev);
2611 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2613 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2615 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2617 goto err_out_free_res;
2620 if(ent->driver_data)
2625 e100_get_defaults(nic);
2627 /* locks must be initialized before calling hw_reset */
2628 spin_lock_init(&nic->cb_lock);
2629 spin_lock_init(&nic->cmd_lock);
2630 spin_lock_init(&nic->mdio_lock);
2632 /* Reset the device before pci_set_master() in case device is in some
2633 * funky state and has an interrupt pending - hint: we don't have the
2634 * interrupt handler registered yet. */
2637 pci_set_master(pdev);
2639 init_timer(&nic->watchdog);
2640 nic->watchdog.function = e100_watchdog;
2641 nic->watchdog.data = (unsigned long)nic;
2642 init_timer(&nic->blink_timer);
2643 nic->blink_timer.function = e100_blink_led;
2644 nic->blink_timer.data = (unsigned long)nic;
2646 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2648 if((err = e100_alloc(nic))) {
2649 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2650 goto err_out_iounmap;
2653 if((err = e100_eeprom_load(nic)))
2658 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2659 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2660 if (!is_valid_ether_addr(netdev->perm_addr)) {
2661 if (!eeprom_bad_csum_allow) {
2662 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2663 "EEPROM, aborting.\n");
2667 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2668 "you MUST configure one.\n");
2672 /* Wol magic packet can be enabled from eeprom */
2673 if((nic->mac >= mac_82558_D101_A4) &&
2674 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2675 nic->flags |= wol_magic;
2677 /* ack any pending wake events, disable PME */
2678 err = pci_enable_wake(pdev, 0, 0);
2680 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
2682 strcpy(netdev->name, "eth%d");
2683 if((err = register_netdev(netdev))) {
2684 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2688 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
2689 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
2690 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), pdev->irq,
2691 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2692 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2699 pci_iounmap(pdev, nic->csr);
2701 pci_release_regions(pdev);
2702 err_out_disable_pdev:
2703 pci_disable_device(pdev);
2705 pci_set_drvdata(pdev, NULL);
2706 free_netdev(netdev);
2710 static void __devexit e100_remove(struct pci_dev *pdev)
2712 struct net_device *netdev = pci_get_drvdata(pdev);
2715 struct nic *nic = netdev_priv(netdev);
2716 unregister_netdev(netdev);
2719 free_netdev(netdev);
2720 pci_release_regions(pdev);
2721 pci_disable_device(pdev);
2722 pci_set_drvdata(pdev, NULL);
2727 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2729 struct net_device *netdev = pci_get_drvdata(pdev);
2730 struct nic *nic = netdev_priv(netdev);
2732 if (netif_running(netdev))
2733 napi_disable(&nic->napi);
2734 del_timer_sync(&nic->watchdog);
2735 netif_carrier_off(nic->netdev);
2736 netif_device_detach(netdev);
2738 pci_save_state(pdev);
2740 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2741 pci_enable_wake(pdev, PCI_D3hot, 1);
2742 pci_enable_wake(pdev, PCI_D3cold, 1);
2744 pci_enable_wake(pdev, PCI_D3hot, 0);
2745 pci_enable_wake(pdev, PCI_D3cold, 0);
2748 pci_disable_device(pdev);
2749 free_irq(pdev->irq, netdev);
2750 pci_set_power_state(pdev, PCI_D3hot);
2755 static int e100_resume(struct pci_dev *pdev)
2757 struct net_device *netdev = pci_get_drvdata(pdev);
2758 struct nic *nic = netdev_priv(netdev);
2760 pci_set_power_state(pdev, PCI_D0);
2761 pci_restore_state(pdev);
2762 /* ack any pending wake events, disable PME */
2763 pci_enable_wake(pdev, 0, 0);
2765 netif_device_attach(netdev);
2766 if (netif_running(netdev))
2771 #endif /* CONFIG_PM */
2773 static void e100_shutdown(struct pci_dev *pdev)
2775 struct net_device *netdev = pci_get_drvdata(pdev);
2776 struct nic *nic = netdev_priv(netdev);
2778 if (netif_running(netdev))
2779 napi_disable(&nic->napi);
2780 del_timer_sync(&nic->watchdog);
2781 netif_carrier_off(nic->netdev);
2783 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2784 pci_enable_wake(pdev, PCI_D3hot, 1);
2785 pci_enable_wake(pdev, PCI_D3cold, 1);
2787 pci_enable_wake(pdev, PCI_D3hot, 0);
2788 pci_enable_wake(pdev, PCI_D3cold, 0);
2791 pci_disable_device(pdev);
2792 pci_set_power_state(pdev, PCI_D3hot);
2795 /* ------------------ PCI Error Recovery infrastructure -------------- */
2797 * e100_io_error_detected - called when PCI error is detected.
2798 * @pdev: Pointer to PCI device
2799 * @state: The current pci conneection state
2801 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2803 struct net_device *netdev = pci_get_drvdata(pdev);
2804 struct nic *nic = netdev_priv(netdev);
2806 /* Similar to calling e100_down(), but avoids adpater I/O. */
2807 netdev->stop(netdev);
2809 /* Detach; put netif into state similar to hotplug unplug. */
2810 napi_enable(&nic->napi);
2811 netif_device_detach(netdev);
2812 pci_disable_device(pdev);
2814 /* Request a slot reset. */
2815 return PCI_ERS_RESULT_NEED_RESET;
2819 * e100_io_slot_reset - called after the pci bus has been reset.
2820 * @pdev: Pointer to PCI device
2822 * Restart the card from scratch.
2824 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2826 struct net_device *netdev = pci_get_drvdata(pdev);
2827 struct nic *nic = netdev_priv(netdev);
2829 if (pci_enable_device(pdev)) {
2830 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2831 return PCI_ERS_RESULT_DISCONNECT;
2833 pci_set_master(pdev);
2835 /* Only one device per card can do a reset */
2836 if (0 != PCI_FUNC(pdev->devfn))
2837 return PCI_ERS_RESULT_RECOVERED;
2841 return PCI_ERS_RESULT_RECOVERED;
2845 * e100_io_resume - resume normal operations
2846 * @pdev: Pointer to PCI device
2848 * Resume normal operations after an error recovery
2849 * sequence has been completed.
2851 static void e100_io_resume(struct pci_dev *pdev)
2853 struct net_device *netdev = pci_get_drvdata(pdev);
2854 struct nic *nic = netdev_priv(netdev);
2856 /* ack any pending wake events, disable PME */
2857 pci_enable_wake(pdev, 0, 0);
2859 netif_device_attach(netdev);
2860 if (netif_running(netdev)) {
2862 mod_timer(&nic->watchdog, jiffies);
2866 static struct pci_error_handlers e100_err_handler = {
2867 .error_detected = e100_io_error_detected,
2868 .slot_reset = e100_io_slot_reset,
2869 .resume = e100_io_resume,
2872 static struct pci_driver e100_driver = {
2874 .id_table = e100_id_table,
2875 .probe = e100_probe,
2876 .remove = __devexit_p(e100_remove),
2878 /* Power Management hooks */
2879 .suspend = e100_suspend,
2880 .resume = e100_resume,
2882 .shutdown = e100_shutdown,
2883 .err_handler = &e100_err_handler,
2886 static int __init e100_init_module(void)
2888 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2889 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2890 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2892 return pci_register_driver(&e100_driver);
2895 static void __exit e100_cleanup_module(void)
2897 pci_unregister_driver(&e100_driver);
2900 module_init(e100_init_module);
2901 module_exit(e100_cleanup_module);