1 /*******************************************************************************
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 * e100.c: Intel(R) PRO/100 ethernet driver
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
55 * II. Driver Operation
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
123 * scenario where all Rx resources have been indicated and none re-
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
137 * testing/troubleshooting the development driver.
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
150 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
152 #include <linux/hardirq.h>
153 #include <linux/interrupt.h>
154 #include <linux/module.h>
155 #include <linux/moduleparam.h>
156 #include <linux/kernel.h>
157 #include <linux/types.h>
158 #include <linux/sched.h>
159 #include <linux/slab.h>
160 #include <linux/delay.h>
161 #include <linux/init.h>
162 #include <linux/pci.h>
163 #include <linux/dma-mapping.h>
164 #include <linux/dmapool.h>
165 #include <linux/netdevice.h>
166 #include <linux/etherdevice.h>
167 #include <linux/mii.h>
168 #include <linux/if_vlan.h>
169 #include <linux/skbuff.h>
170 #include <linux/ethtool.h>
171 #include <linux/string.h>
172 #include <linux/firmware.h>
173 #include <linux/rtnetlink.h>
174 #include <asm/unaligned.h>
177 #define DRV_NAME "e100"
178 #define DRV_EXT "-NAPI"
179 #define DRV_VERSION "3.5.24-k2"DRV_EXT
180 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
181 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
183 #define E100_WATCHDOG_PERIOD (2 * HZ)
184 #define E100_NAPI_WEIGHT 16
186 #define FIRMWARE_D101M "e100/d101m_ucode.bin"
187 #define FIRMWARE_D101S "e100/d101s_ucode.bin"
188 #define FIRMWARE_D102E "e100/d102e_ucode.bin"
190 MODULE_DESCRIPTION(DRV_DESCRIPTION);
191 MODULE_AUTHOR(DRV_COPYRIGHT);
192 MODULE_LICENSE("GPL");
193 MODULE_VERSION(DRV_VERSION);
194 MODULE_FIRMWARE(FIRMWARE_D101M);
195 MODULE_FIRMWARE(FIRMWARE_D101S);
196 MODULE_FIRMWARE(FIRMWARE_D102E);
198 static int debug = 3;
199 static int eeprom_bad_csum_allow = 0;
200 static int use_io = 0;
201 module_param(debug, int, 0);
202 module_param(eeprom_bad_csum_allow, int, 0);
203 module_param(use_io, int, 0);
204 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
205 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
206 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
208 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211 static const struct pci_device_id e100_id_table[] = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
256 MODULE_DEVICE_TABLE(pci, e100_id_table);
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
267 mac_82550_D102_C = 13,
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
283 phy_82552_v = 0xd061004d,
284 phy_unknown = 0xFFFFFFFF,
287 /* CSR (Control/Status Registers) */
313 RU_UNINITIALIZED = -1,
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
329 irq_mask_none = 0x00,
337 ruc_load_base = 0x06,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
352 software_reset = 0x0000,
354 selective_reset = 0x0002,
357 enum eeprom_ctrl_lo {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
377 enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
379 eeprom_phy_iface = 0x06,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
385 enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
389 enum eeprom_phy_iface {
402 eeprom_id_wol = 0x0020,
405 enum eeprom_config_asf {
411 cb_complete = 0x8000,
416 * cb_command - Command Block flags
417 * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
445 struct rx *next, *prev;
450 #if defined(__BIG_ENDIAN_BITFIELD)
456 /*0*/ u8 X(byte_count:6, pad0:2);
457 /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
458 /*2*/ u8 adaptive_ifs;
459 /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460 term_write_cache_line:1), pad3:4);
461 /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
462 /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463 /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
465 rx_save_overruns : 1), rx_save_bad_frames : 1);
466 /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
469 /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470 /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472 /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
474 /*11*/ u8 X(linear_priority:3, pad11:5);
475 /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476 /*13*/ u8 ip_addr_lo;
477 /*14*/ u8 ip_addr_hi;
478 /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480 pad15_2:1), crs_or_cdt:1);
481 /*16*/ u8 fc_delay_lo;
482 /*17*/ u8 fc_delay_hi;
483 /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485 /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487 full_duplex_force:1), full_duplex_pin:1);
488 /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489 /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490 /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
494 #define E100_MAX_MULTICAST_ADDRS 64
497 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
500 /* Important: keep total struct u32-aligned */
501 #define UCODE_SIZE 134
508 __le32 ucode[UCODE_SIZE];
509 struct config config;
522 __le32 dump_buffer_addr;
524 struct cb *next, *prev;
530 lb_none = 0, lb_mac = 1, lb_phy = 3,
534 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
535 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
536 tx_multiple_collisions, tx_total_collisions;
537 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
538 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
539 rx_short_frame_errors;
540 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
541 __le16 xmt_tco_frames, rcv_tco_frames;
561 struct param_range rfds;
562 struct param_range cbs;
566 /* Begin: frequently used values: keep adjacent for cache effect */
567 u32 msg_enable ____cacheline_aligned;
568 struct net_device *netdev;
569 struct pci_dev *pdev;
570 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
572 struct rx *rxs ____cacheline_aligned;
573 struct rx *rx_to_use;
574 struct rx *rx_to_clean;
575 struct rfd blank_rfd;
576 enum ru_state ru_running;
578 spinlock_t cb_lock ____cacheline_aligned;
580 struct csr __iomem *csr;
581 enum scb_cmd_lo cuc_cmd;
582 unsigned int cbs_avail;
583 struct napi_struct napi;
585 struct cb *cb_to_use;
586 struct cb *cb_to_send;
587 struct cb *cb_to_clean;
589 /* End: frequently used values: keep adjacent for cache effect */
593 promiscuous = (1 << 1),
594 multicast_all = (1 << 2),
595 wol_magic = (1 << 3),
596 ich_10h_workaround = (1 << 4),
597 } flags ____cacheline_aligned;
601 struct params params;
602 struct timer_list watchdog;
603 struct mii_if_info mii;
604 struct work_struct tx_timeout_task;
605 enum loopback loopback;
610 struct pci_pool *cbs_pool;
611 dma_addr_t cbs_dma_addr;
617 u32 tx_single_collisions;
618 u32 tx_multiple_collisions;
623 u32 rx_fc_unsupported;
625 u32 rx_short_frame_errors;
626 u32 rx_over_length_errors;
630 spinlock_t mdio_lock;
631 const struct firmware *fw;
634 static inline void e100_write_flush(struct nic *nic)
636 /* Flush previous PCI writes through intermediate bridges
637 * by doing a benign read */
638 (void)ioread8(&nic->csr->scb.status);
641 static void e100_enable_irq(struct nic *nic)
645 spin_lock_irqsave(&nic->cmd_lock, flags);
646 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
647 e100_write_flush(nic);
648 spin_unlock_irqrestore(&nic->cmd_lock, flags);
651 static void e100_disable_irq(struct nic *nic)
655 spin_lock_irqsave(&nic->cmd_lock, flags);
656 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
657 e100_write_flush(nic);
658 spin_unlock_irqrestore(&nic->cmd_lock, flags);
661 static void e100_hw_reset(struct nic *nic)
663 /* Put CU and RU into idle with a selective reset to get
664 * device off of PCI bus */
665 iowrite32(selective_reset, &nic->csr->port);
666 e100_write_flush(nic); udelay(20);
668 /* Now fully reset device */
669 iowrite32(software_reset, &nic->csr->port);
670 e100_write_flush(nic); udelay(20);
672 /* Mask off our interrupt line - it's unmasked after reset */
673 e100_disable_irq(nic);
676 static int e100_self_test(struct nic *nic)
678 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
680 /* Passing the self-test is a pretty good indication
681 * that the device can DMA to/from host memory */
683 nic->mem->selftest.signature = 0;
684 nic->mem->selftest.result = 0xFFFFFFFF;
686 iowrite32(selftest | dma_addr, &nic->csr->port);
687 e100_write_flush(nic);
688 /* Wait 10 msec for self-test to complete */
691 /* Interrupts are enabled after self-test */
692 e100_disable_irq(nic);
694 /* Check results of self-test */
695 if (nic->mem->selftest.result != 0) {
696 netif_err(nic, hw, nic->netdev,
697 "Self-test failed: result=0x%08X\n",
698 nic->mem->selftest.result);
701 if (nic->mem->selftest.signature == 0) {
702 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
709 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
711 u32 cmd_addr_data[3];
715 /* Three cmds: write/erase enable, write data, write/erase disable */
716 cmd_addr_data[0] = op_ewen << (addr_len - 2);
717 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
719 cmd_addr_data[2] = op_ewds << (addr_len - 2);
721 /* Bit-bang cmds to write word to eeprom */
722 for (j = 0; j < 3; j++) {
725 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
726 e100_write_flush(nic); udelay(4);
728 for (i = 31; i >= 0; i--) {
729 ctrl = (cmd_addr_data[j] & (1 << i)) ?
731 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
732 e100_write_flush(nic); udelay(4);
734 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
735 e100_write_flush(nic); udelay(4);
737 /* Wait 10 msec for cmd to complete */
741 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
742 e100_write_flush(nic); udelay(4);
746 /* General technique stolen from the eepro100 driver - very clever */
747 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
754 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
757 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
758 e100_write_flush(nic); udelay(4);
760 /* Bit-bang to read word from eeprom */
761 for (i = 31; i >= 0; i--) {
762 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
763 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
764 e100_write_flush(nic); udelay(4);
766 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
767 e100_write_flush(nic); udelay(4);
769 /* Eeprom drives a dummy zero to EEDO after receiving
770 * complete address. Use this to adjust addr_len. */
771 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
772 if (!(ctrl & eedo) && i > 16) {
773 *addr_len -= (i - 16);
777 data = (data << 1) | (ctrl & eedo ? 1 : 0);
781 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
782 e100_write_flush(nic); udelay(4);
784 return cpu_to_le16(data);
787 /* Load entire EEPROM image into driver cache and validate checksum */
788 static int e100_eeprom_load(struct nic *nic)
790 u16 addr, addr_len = 8, checksum = 0;
792 /* Try reading with an 8-bit addr len to discover actual addr len */
793 e100_eeprom_read(nic, &addr_len, 0);
794 nic->eeprom_wc = 1 << addr_len;
796 for (addr = 0; addr < nic->eeprom_wc; addr++) {
797 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
798 if (addr < nic->eeprom_wc - 1)
799 checksum += le16_to_cpu(nic->eeprom[addr]);
802 /* The checksum, stored in the last word, is calculated such that
803 * the sum of words should be 0xBABA */
804 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
805 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
806 if (!eeprom_bad_csum_allow)
813 /* Save (portion of) driver EEPROM cache to device and update checksum */
814 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
816 u16 addr, addr_len = 8, checksum = 0;
818 /* Try reading with an 8-bit addr len to discover actual addr len */
819 e100_eeprom_read(nic, &addr_len, 0);
820 nic->eeprom_wc = 1 << addr_len;
822 if (start + count >= nic->eeprom_wc)
825 for (addr = start; addr < start + count; addr++)
826 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
828 /* The checksum, stored in the last word, is calculated such that
829 * the sum of words should be 0xBABA */
830 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
831 checksum += le16_to_cpu(nic->eeprom[addr]);
832 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
833 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834 nic->eeprom[nic->eeprom_wc - 1]);
839 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
840 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */
841 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
847 spin_lock_irqsave(&nic->cmd_lock, flags);
849 /* Previous command is accepted when SCB clears */
850 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
854 if (unlikely(i > E100_WAIT_SCB_FAST))
857 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
862 if (unlikely(cmd != cuc_resume))
863 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864 iowrite8(cmd, &nic->csr->scb.cmd_lo);
867 spin_unlock_irqrestore(&nic->cmd_lock, flags);
872 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
873 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
879 spin_lock_irqsave(&nic->cb_lock, flags);
881 if (unlikely(!nic->cbs_avail)) {
887 nic->cb_to_use = cb->next;
891 err = cb_prepare(nic, cb, skb);
895 if (unlikely(!nic->cbs_avail))
899 /* Order is important otherwise we'll be in a race with h/w:
900 * set S-bit in current first, then clear S-bit in previous. */
901 cb->command |= cpu_to_le16(cb_s);
903 cb->prev->command &= cpu_to_le16(~cb_s);
905 while (nic->cb_to_send != nic->cb_to_use) {
906 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
907 nic->cb_to_send->dma_addr))) {
908 /* Ok, here's where things get sticky. It's
909 * possible that we can't schedule the command
910 * because the controller is too busy, so
911 * let's just queue the command and try again
912 * when another command is scheduled. */
913 if (err == -ENOSPC) {
915 schedule_work(&nic->tx_timeout_task);
919 nic->cuc_cmd = cuc_resume;
920 nic->cb_to_send = nic->cb_to_send->next;
925 spin_unlock_irqrestore(&nic->cb_lock, flags);
930 static int mdio_read(struct net_device *netdev, int addr, int reg)
932 struct nic *nic = netdev_priv(netdev);
933 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
936 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
938 struct nic *nic = netdev_priv(netdev);
940 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
943 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
944 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
952 * Stratus87247: we shouldn't be writing the MDI control
953 * register until the Ready bit shows True. Also, since
954 * manipulation of the MDI control registers is a multi-step
955 * procedure it should be done under lock.
957 spin_lock_irqsave(&nic->mdio_lock, flags);
958 for (i = 100; i; --i) {
959 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
964 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
965 spin_unlock_irqrestore(&nic->mdio_lock, flags);
966 return 0; /* No way to indicate timeout error */
968 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
970 for (i = 0; i < 100; i++) {
972 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
975 spin_unlock_irqrestore(&nic->mdio_lock, flags);
976 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
977 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
978 dir == mdi_read ? "READ" : "WRITE",
979 addr, reg, data, data_out);
980 return (u16)data_out;
983 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
984 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
990 if ((reg == MII_BMCR) && (dir == mdi_write)) {
991 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
992 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
996 * Workaround Si issue where sometimes the part will not
997 * autoneg to 100Mbps even when advertised.
999 if (advert & ADVERTISE_100FULL)
1000 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
1001 else if (advert & ADVERTISE_100HALF)
1002 data |= BMCR_SPEED100;
1005 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1008 /* Fully software-emulated mdio_ctrl() function for cards without
1009 * MII-compliant PHYs.
1010 * For now, this is mainly geared towards 80c24 support; in case of further
1011 * requirements for other types (i82503, ...?) either extend this mechanism
1012 * or split it, whichever is cleaner.
1014 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1020 /* might need to allocate a netdev_priv'ed register array eventually
1021 * to be able to record state changes, but for now
1022 * some fully hardcoded register handling ought to be ok I guess. */
1024 if (dir == mdi_read) {
1027 /* Auto-negotiation, right? */
1028 return BMCR_ANENABLE |
1031 return BMSR_LSTATUS /* for mii_link_ok() */ |
1035 /* 80c24 is a "combo card" PHY, right? */
1036 return ADVERTISE_10HALF |
1039 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1040 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1041 dir == mdi_read ? "READ" : "WRITE",
1048 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1049 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1050 dir == mdi_read ? "READ" : "WRITE",
1056 static inline int e100_phy_supports_mii(struct nic *nic)
1058 /* for now, just check it by comparing whether we
1059 are using MII software emulation.
1061 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1064 static void e100_get_defaults(struct nic *nic)
1066 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1067 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1069 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
1070 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1071 if (nic->mac == mac_unknown)
1072 nic->mac = mac_82557_D100_A;
1074 nic->params.rfds = rfds;
1075 nic->params.cbs = cbs;
1077 /* Quadwords to DMA into FIFO before starting frame transmit */
1078 nic->tx_threshold = 0xE0;
1080 /* no interrupt for every tx completion, delay = 256us if not 557 */
1081 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1082 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1084 /* Template for a freshly allocated RFD */
1085 nic->blank_rfd.command = 0;
1086 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1087 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1090 nic->mii.phy_id_mask = 0x1F;
1091 nic->mii.reg_num_mask = 0x1F;
1092 nic->mii.dev = nic->netdev;
1093 nic->mii.mdio_read = mdio_read;
1094 nic->mii.mdio_write = mdio_write;
1097 static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1099 struct config *config = &cb->u.config;
1100 u8 *c = (u8 *)config;
1101 struct net_device *netdev = nic->netdev;
1103 cb->command = cpu_to_le16(cb_config);
1105 memset(config, 0, sizeof(struct config));
1107 config->byte_count = 0x16; /* bytes in this struct */
1108 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1109 config->direct_rx_dma = 0x1; /* reserved */
1110 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1111 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1112 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1113 config->tx_underrun_retry = 0x3; /* # of underrun retries */
1114 if (e100_phy_supports_mii(nic))
1115 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1116 config->pad10 = 0x6;
1117 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1118 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1119 config->ifs = 0x6; /* x16 = inter frame spacing */
1120 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1121 config->pad15_1 = 0x1;
1122 config->pad15_2 = 0x1;
1123 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1124 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1125 config->tx_padding = 0x1; /* 1=pad short frames */
1126 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1127 config->pad18 = 0x1;
1128 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1129 config->pad20_1 = 0x1F;
1130 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1131 config->pad21_1 = 0x5;
1133 config->adaptive_ifs = nic->adaptive_ifs;
1134 config->loopback = nic->loopback;
1136 if (nic->mii.force_media && nic->mii.full_duplex)
1137 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1139 if (nic->flags & promiscuous || nic->loopback) {
1140 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1141 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1142 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1145 if (unlikely(netdev->features & NETIF_F_RXFCS))
1146 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1148 if (nic->flags & multicast_all)
1149 config->multicast_all = 0x1; /* 1=accept, 0=no */
1151 /* disable WoL when up */
1152 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1153 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1155 if (nic->mac >= mac_82558_D101_A4) {
1156 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1157 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1158 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1159 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1160 if (nic->mac >= mac_82559_D101M) {
1161 config->tno_intr = 0x1; /* TCO stats enable */
1162 /* Enable TCO in extended config */
1163 if (nic->mac >= mac_82551_10) {
1164 config->byte_count = 0x20; /* extended bytes */
1165 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1168 config->standard_stat_counter = 0x0;
1172 if (netdev->features & NETIF_F_RXALL) {
1173 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1174 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1175 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1180 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1182 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1187 /*************************************************************************
1188 * CPUSaver parameters
1190 * All CPUSaver parameters are 16-bit literals that are part of a
1191 * "move immediate value" instruction. By changing the value of
1192 * the literal in the instruction before the code is loaded, the
1193 * driver can change the algorithm.
1195 * INTDELAY - This loads the dead-man timer with its initial value.
1196 * When this timer expires the interrupt is asserted, and the
1197 * timer is reset each time a new packet is received. (see
1198 * BUNDLEMAX below to set the limit on number of chained packets)
1199 * The current default is 0x600 or 1536. Experiments show that
1200 * the value should probably stay within the 0x200 - 0x1000.
1203 * This sets the maximum number of frames that will be bundled. In
1204 * some situations, such as the TCP windowing algorithm, it may be
1205 * better to limit the growth of the bundle size than let it go as
1206 * high as it can, because that could cause too much added latency.
1207 * The default is six, because this is the number of packets in the
1208 * default TCP window size. A value of 1 would make CPUSaver indicate
1209 * an interrupt for every frame received. If you do not want to put
1210 * a limit on the bundle size, set this value to xFFFF.
1213 * This contains a bit-mask describing the minimum size frame that
1214 * will be bundled. The default masks the lower 7 bits, which means
1215 * that any frame less than 128 bytes in length will not be bundled,
1216 * but will instead immediately generate an interrupt. This does
1217 * not affect the current bundle in any way. Any frame that is 128
1218 * bytes or large will be bundled normally. This feature is meant
1219 * to provide immediate indication of ACK frames in a TCP environment.
1220 * Customers were seeing poor performance when a machine with CPUSaver
1221 * enabled was sending but not receiving. The delay introduced when
1222 * the ACKs were received was enough to reduce total throughput, because
1223 * the sender would sit idle until the ACK was finally seen.
1225 * The current default is 0xFF80, which masks out the lower 7 bits.
1226 * This means that any frame which is x7F (127) bytes or smaller
1227 * will cause an immediate interrupt. Because this value must be a
1228 * bit mask, there are only a few valid values that can be used. To
1229 * turn this feature off, the driver can write the value xFFFF to the
1230 * lower word of this instruction (in the same way that the other
1231 * parameters are used). Likewise, a value of 0xF800 (2047) would
1232 * cause an interrupt to be generated for every frame, because all
1233 * standard Ethernet frames are <= 2047 bytes in length.
1234 *************************************************************************/
1236 /* if you wish to disable the ucode functionality, while maintaining the
1237 * workarounds it provides, set the following defines to:
1242 #define BUNDLESMALL 1
1243 #define BUNDLEMAX (u16)6
1244 #define INTDELAY (u16)1536 /* 0x600 */
1246 /* Initialize firmware */
1247 static const struct firmware *e100_request_firmware(struct nic *nic)
1249 const char *fw_name;
1250 const struct firmware *fw = nic->fw;
1251 u8 timer, bundle, min_size;
1253 bool required = false;
1255 /* do not load u-code for ICH devices */
1256 if (nic->flags & ich)
1259 /* Search for ucode match against h/w revision
1261 * Based on comments in the source code for the FreeBSD fxp
1262 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
1264 * "fixes for bugs in the B-step hardware (specifically, bugs
1265 * with Inline Receive)."
1267 * So we must fail if it cannot be loaded.
1269 * The other microcode files are only required for the optional
1270 * CPUSaver feature. Nice to have, but no reason to fail.
1272 if (nic->mac == mac_82559_D101M) {
1273 fw_name = FIRMWARE_D101M;
1274 } else if (nic->mac == mac_82559_D101S) {
1275 fw_name = FIRMWARE_D101S;
1276 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1277 fw_name = FIRMWARE_D102E;
1279 } else { /* No ucode on other devices */
1283 /* If the firmware has not previously been loaded, request a pointer
1284 * to it. If it was previously loaded, we are reinitializing the
1285 * adapter, possibly in a resume from hibernate, in which case
1286 * request_firmware() cannot be used.
1289 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1293 netif_err(nic, probe, nic->netdev,
1294 "Failed to load firmware \"%s\": %d\n",
1296 return ERR_PTR(err);
1298 netif_info(nic, probe, nic->netdev,
1299 "CPUSaver disabled. Needs \"%s\": %d\n",
1305 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1306 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1307 if (fw->size != UCODE_SIZE * 4 + 3) {
1308 netif_err(nic, probe, nic->netdev,
1309 "Firmware \"%s\" has wrong size %zu\n",
1311 release_firmware(fw);
1312 return ERR_PTR(-EINVAL);
1315 /* Read timer, bundle and min_size from end of firmware blob */
1316 timer = fw->data[UCODE_SIZE * 4];
1317 bundle = fw->data[UCODE_SIZE * 4 + 1];
1318 min_size = fw->data[UCODE_SIZE * 4 + 2];
1320 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1321 min_size >= UCODE_SIZE) {
1322 netif_err(nic, probe, nic->netdev,
1323 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1324 fw_name, timer, bundle, min_size);
1325 release_firmware(fw);
1326 return ERR_PTR(-EINVAL);
1329 /* OK, firmware is validated and ready to use. Save a pointer
1330 * to it in the nic */
1335 static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1336 struct sk_buff *skb)
1338 const struct firmware *fw = (void *)skb;
1339 u8 timer, bundle, min_size;
1341 /* It's not a real skb; we just abused the fact that e100_exec_cb
1342 will pass it through to here... */
1345 /* firmware is stored as little endian already */
1346 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1348 /* Read timer, bundle and min_size from end of firmware blob */
1349 timer = fw->data[UCODE_SIZE * 4];
1350 bundle = fw->data[UCODE_SIZE * 4 + 1];
1351 min_size = fw->data[UCODE_SIZE * 4 + 2];
1353 /* Insert user-tunable settings in cb->u.ucode */
1354 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1355 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1356 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1357 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1358 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1359 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1361 cb->command = cpu_to_le16(cb_ucode | cb_el);
1365 static inline int e100_load_ucode_wait(struct nic *nic)
1367 const struct firmware *fw;
1368 int err = 0, counter = 50;
1369 struct cb *cb = nic->cb_to_clean;
1371 fw = e100_request_firmware(nic);
1372 /* If it's NULL, then no ucode is required */
1373 if (!fw || IS_ERR(fw))
1376 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1377 netif_err(nic, probe, nic->netdev,
1378 "ucode cmd failed with error %d\n", err);
1380 /* must restart cuc */
1381 nic->cuc_cmd = cuc_start;
1383 /* wait for completion */
1384 e100_write_flush(nic);
1387 /* wait for possibly (ouch) 500ms */
1388 while (!(cb->status & cpu_to_le16(cb_complete))) {
1390 if (!--counter) break;
1393 /* ack any interrupts, something could have been set */
1394 iowrite8(~0, &nic->csr->scb.stat_ack);
1396 /* if the command failed, or is not OK, notify and return */
1397 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1398 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1405 static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1406 struct sk_buff *skb)
1408 cb->command = cpu_to_le16(cb_iaaddr);
1409 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1413 static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1415 cb->command = cpu_to_le16(cb_dump);
1416 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1417 offsetof(struct mem, dump_buf));
1421 static int e100_phy_check_without_mii(struct nic *nic)
1426 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1429 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1430 case I82503: /* Non-MII PHY; UNTESTED! */
1431 case S80C24: /* Non-MII PHY; tested and working */
1432 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1433 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1434 * doesn't have a programming interface of any sort. The
1435 * media is sensed automatically based on how the link partner
1436 * is configured. This is, in essence, manual configuration.
1438 netif_info(nic, probe, nic->netdev,
1439 "found MII-less i82503 or 80c24 or other PHY\n");
1441 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1442 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1444 /* these might be needed for certain MII-less cards...
1445 * nic->flags |= ich;
1446 * nic->flags |= ich_10h_workaround; */
1457 #define NCONFIG_AUTO_SWITCH 0x0080
1458 #define MII_NSC_CONG MII_RESV1
1459 #define NSC_CONG_ENABLE 0x0100
1460 #define NSC_CONG_TXREADY 0x0400
1461 #define ADVERTISE_FC_SUPPORTED 0x0400
1462 static int e100_phy_init(struct nic *nic)
1464 struct net_device *netdev = nic->netdev;
1466 u16 bmcr, stat, id_lo, id_hi, cong;
1468 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1469 for (addr = 0; addr < 32; addr++) {
1470 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1471 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1472 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1473 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1474 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1478 /* uhoh, no PHY detected: check whether we seem to be some
1479 * weird, rare variant which is *known* to not have any MII.
1480 * But do this AFTER MII checking only, since this does
1481 * lookup of EEPROM values which may easily be unreliable. */
1482 if (e100_phy_check_without_mii(nic))
1483 return 0; /* simply return and hope for the best */
1485 /* for unknown cases log a fatal error */
1486 netif_err(nic, hw, nic->netdev,
1487 "Failed to locate any known PHY, aborting\n");
1491 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1492 "phy_addr = %d\n", nic->mii.phy_id);
1495 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1496 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1497 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1498 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1499 "phy ID = 0x%08X\n", nic->phy);
1501 /* Select the phy and isolate the rest */
1502 for (addr = 0; addr < 32; addr++) {
1503 if (addr != nic->mii.phy_id) {
1504 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1505 } else if (nic->phy != phy_82552_v) {
1506 bmcr = mdio_read(netdev, addr, MII_BMCR);
1507 mdio_write(netdev, addr, MII_BMCR,
1508 bmcr & ~BMCR_ISOLATE);
1512 * Workaround for 82552:
1513 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1514 * other phy_id's) using bmcr value from addr discovery loop above.
1516 if (nic->phy == phy_82552_v)
1517 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1518 bmcr & ~BMCR_ISOLATE);
1520 /* Handle National tx phys */
1521 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1522 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1523 /* Disable congestion control */
1524 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1525 cong |= NSC_CONG_TXREADY;
1526 cong &= ~NSC_CONG_ENABLE;
1527 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1530 if (nic->phy == phy_82552_v) {
1531 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1533 /* assign special tweaked mdio_ctrl() function */
1534 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1536 /* Workaround Si not advertising flow-control during autoneg */
1537 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1538 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1540 /* Reset for the above changes to take effect */
1541 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1543 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1544 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1545 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1546 (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1547 /* enable/disable MDI/MDI-X auto-switching. */
1548 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1549 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1555 static int e100_hw_init(struct nic *nic)
1561 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1562 if (!in_interrupt() && (err = e100_self_test(nic)))
1565 if ((err = e100_phy_init(nic)))
1567 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1569 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1571 if ((err = e100_load_ucode_wait(nic)))
1573 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1575 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1577 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1578 nic->dma_addr + offsetof(struct mem, stats))))
1580 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1583 e100_disable_irq(nic);
1588 static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1590 struct net_device *netdev = nic->netdev;
1591 struct netdev_hw_addr *ha;
1592 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1594 cb->command = cpu_to_le16(cb_multi);
1595 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1597 netdev_for_each_mc_addr(ha, netdev) {
1600 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1606 static void e100_set_multicast_list(struct net_device *netdev)
1608 struct nic *nic = netdev_priv(netdev);
1610 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1611 "mc_count=%d, flags=0x%04X\n",
1612 netdev_mc_count(netdev), netdev->flags);
1614 if (netdev->flags & IFF_PROMISC)
1615 nic->flags |= promiscuous;
1617 nic->flags &= ~promiscuous;
1619 if (netdev->flags & IFF_ALLMULTI ||
1620 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1621 nic->flags |= multicast_all;
1623 nic->flags &= ~multicast_all;
1625 e100_exec_cb(nic, NULL, e100_configure);
1626 e100_exec_cb(nic, NULL, e100_multi);
1629 static void e100_update_stats(struct nic *nic)
1631 struct net_device *dev = nic->netdev;
1632 struct net_device_stats *ns = &dev->stats;
1633 struct stats *s = &nic->mem->stats;
1634 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1635 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1638 /* Device's stats reporting may take several microseconds to
1639 * complete, so we're always waiting for results of the
1640 * previous command. */
1642 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1644 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1645 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1646 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1647 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1648 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1649 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1650 ns->collisions += nic->tx_collisions;
1651 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1652 le32_to_cpu(s->tx_lost_crs);
1653 nic->rx_short_frame_errors +=
1654 le32_to_cpu(s->rx_short_frame_errors);
1655 ns->rx_length_errors = nic->rx_short_frame_errors +
1656 nic->rx_over_length_errors;
1657 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1658 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1659 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1660 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1661 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1662 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1663 le32_to_cpu(s->rx_alignment_errors) +
1664 le32_to_cpu(s->rx_short_frame_errors) +
1665 le32_to_cpu(s->rx_cdt_errors);
1666 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1667 nic->tx_single_collisions +=
1668 le32_to_cpu(s->tx_single_collisions);
1669 nic->tx_multiple_collisions +=
1670 le32_to_cpu(s->tx_multiple_collisions);
1671 if (nic->mac >= mac_82558_D101_A4) {
1672 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1673 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1674 nic->rx_fc_unsupported +=
1675 le32_to_cpu(s->fc_rcv_unsupported);
1676 if (nic->mac >= mac_82559_D101M) {
1677 nic->tx_tco_frames +=
1678 le16_to_cpu(s->xmt_tco_frames);
1679 nic->rx_tco_frames +=
1680 le16_to_cpu(s->rcv_tco_frames);
1686 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1687 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1688 "exec cuc_dump_reset failed\n");
1691 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1693 /* Adjust inter-frame-spacing (IFS) between two transmits if
1694 * we're getting collisions on a half-duplex connection. */
1696 if (duplex == DUPLEX_HALF) {
1697 u32 prev = nic->adaptive_ifs;
1698 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1700 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1701 (nic->tx_frames > min_frames)) {
1702 if (nic->adaptive_ifs < 60)
1703 nic->adaptive_ifs += 5;
1704 } else if (nic->tx_frames < min_frames) {
1705 if (nic->adaptive_ifs >= 5)
1706 nic->adaptive_ifs -= 5;
1708 if (nic->adaptive_ifs != prev)
1709 e100_exec_cb(nic, NULL, e100_configure);
1713 static void e100_watchdog(unsigned long data)
1715 struct nic *nic = (struct nic *)data;
1716 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1719 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1720 "right now = %ld\n", jiffies);
1722 /* mii library handles link maintenance tasks */
1724 mii_ethtool_gset(&nic->mii, &cmd);
1725 speed = ethtool_cmd_speed(&cmd);
1727 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1728 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1729 speed == SPEED_100 ? 100 : 10,
1730 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1731 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1732 netdev_info(nic->netdev, "NIC Link is Down\n");
1735 mii_check_link(&nic->mii);
1737 /* Software generated interrupt to recover from (rare) Rx
1738 * allocation failure.
1739 * Unfortunately have to use a spinlock to not re-enable interrupts
1740 * accidentally, due to hardware that shares a register between the
1741 * interrupt mask bit and the SW Interrupt generation bit */
1742 spin_lock_irq(&nic->cmd_lock);
1743 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1744 e100_write_flush(nic);
1745 spin_unlock_irq(&nic->cmd_lock);
1747 e100_update_stats(nic);
1748 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1750 if (nic->mac <= mac_82557_D100_C)
1751 /* Issue a multicast command to workaround a 557 lock up */
1752 e100_set_multicast_list(nic->netdev);
1754 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1755 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1756 nic->flags |= ich_10h_workaround;
1758 nic->flags &= ~ich_10h_workaround;
1760 mod_timer(&nic->watchdog,
1761 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1764 static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1765 struct sk_buff *skb)
1767 dma_addr_t dma_addr;
1768 cb->command = nic->tx_command;
1770 dma_addr = pci_map_single(nic->pdev,
1771 skb->data, skb->len, PCI_DMA_TODEVICE);
1772 /* If we can't map the skb, have the upper layer try later */
1773 if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
1774 dev_kfree_skb_any(skb);
1780 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1781 * testing, ie sending frames with bad CRC.
1783 if (unlikely(skb->no_fcs))
1784 cb->command |= cpu_to_le16(cb_tx_nc);
1786 cb->command &= ~cpu_to_le16(cb_tx_nc);
1788 /* interrupt every 16 packets regardless of delay */
1789 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1790 cb->command |= cpu_to_le16(cb_i);
1791 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1792 cb->u.tcb.tcb_byte_count = 0;
1793 cb->u.tcb.threshold = nic->tx_threshold;
1794 cb->u.tcb.tbd_count = 1;
1795 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1796 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1797 skb_tx_timestamp(skb);
1801 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1802 struct net_device *netdev)
1804 struct nic *nic = netdev_priv(netdev);
1807 if (nic->flags & ich_10h_workaround) {
1808 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1809 Issue a NOP command followed by a 1us delay before
1810 issuing the Tx command. */
1811 if (e100_exec_cmd(nic, cuc_nop, 0))
1812 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1813 "exec cuc_nop failed\n");
1817 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1821 /* We queued the skb, but now we're out of space. */
1822 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1823 "No space for CB\n");
1824 netif_stop_queue(netdev);
1827 /* This is a hard error - log it. */
1828 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1829 "Out of Tx resources, returning skb\n");
1830 netif_stop_queue(netdev);
1831 return NETDEV_TX_BUSY;
1834 return NETDEV_TX_OK;
1837 static int e100_tx_clean(struct nic *nic)
1839 struct net_device *dev = nic->netdev;
1843 spin_lock(&nic->cb_lock);
1845 /* Clean CBs marked complete */
1846 for (cb = nic->cb_to_clean;
1847 cb->status & cpu_to_le16(cb_complete);
1848 cb = nic->cb_to_clean = cb->next) {
1849 dma_rmb(); /* read skb after status */
1850 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1851 "cb[%d]->status = 0x%04X\n",
1852 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1855 if (likely(cb->skb != NULL)) {
1856 dev->stats.tx_packets++;
1857 dev->stats.tx_bytes += cb->skb->len;
1859 pci_unmap_single(nic->pdev,
1860 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1861 le16_to_cpu(cb->u.tcb.tbd.size),
1863 dev_kfree_skb_any(cb->skb);
1871 spin_unlock(&nic->cb_lock);
1873 /* Recover from running out of Tx resources in xmit_frame */
1874 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1875 netif_wake_queue(nic->netdev);
1880 static void e100_clean_cbs(struct nic *nic)
1883 while (nic->cbs_avail != nic->params.cbs.count) {
1884 struct cb *cb = nic->cb_to_clean;
1886 pci_unmap_single(nic->pdev,
1887 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1888 le16_to_cpu(cb->u.tcb.tbd.size),
1890 dev_kfree_skb(cb->skb);
1892 nic->cb_to_clean = nic->cb_to_clean->next;
1895 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1899 nic->cuc_cmd = cuc_start;
1900 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1904 static int e100_alloc_cbs(struct nic *nic)
1907 unsigned int i, count = nic->params.cbs.count;
1909 nic->cuc_cmd = cuc_start;
1910 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1913 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1914 &nic->cbs_dma_addr);
1917 memset(nic->cbs, 0, count * sizeof(struct cb));
1919 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1920 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1921 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1923 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1924 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1925 ((i+1) % count) * sizeof(struct cb));
1928 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1929 nic->cbs_avail = count;
1934 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1936 if (!nic->rxs) return;
1937 if (RU_SUSPENDED != nic->ru_running) return;
1939 /* handle init time starts */
1940 if (!rx) rx = nic->rxs;
1942 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1944 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1945 nic->ru_running = RU_RUNNING;
1949 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1950 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1952 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1955 /* Init, and map the RFD. */
1956 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1957 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1958 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1960 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1961 dev_kfree_skb_any(rx->skb);
1967 /* Link the RFD to end of RFA by linking previous RFD to
1968 * this one. We are safe to touch the previous RFD because
1969 * it is protected by the before last buffer's el bit being set */
1970 if (rx->prev->skb) {
1971 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1972 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1973 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1974 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1980 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1981 unsigned int *work_done, unsigned int work_to_do)
1983 struct net_device *dev = nic->netdev;
1984 struct sk_buff *skb = rx->skb;
1985 struct rfd *rfd = (struct rfd *)skb->data;
1986 u16 rfd_status, actual_size;
1989 if (unlikely(work_done && *work_done >= work_to_do))
1992 /* Need to sync before taking a peek at cb_complete bit */
1993 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1994 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1995 rfd_status = le16_to_cpu(rfd->status);
1997 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1998 "status=0x%04X\n", rfd_status);
1999 dma_rmb(); /* read size after status bit */
2001 /* If data isn't ready, nothing to indicate */
2002 if (unlikely(!(rfd_status & cb_complete))) {
2003 /* If the next buffer has the el bit, but we think the receiver
2004 * is still running, check to see if it really stopped while
2005 * we had interrupts off.
2006 * This allows for a fast restart without re-enabling
2008 if ((le16_to_cpu(rfd->command) & cb_el) &&
2009 (RU_RUNNING == nic->ru_running))
2011 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2012 nic->ru_running = RU_SUSPENDED;
2013 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2015 PCI_DMA_FROMDEVICE);
2019 /* Get actual data size */
2020 if (unlikely(dev->features & NETIF_F_RXFCS))
2022 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
2023 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
2024 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2027 pci_unmap_single(nic->pdev, rx->dma_addr,
2028 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2030 /* If this buffer has the el bit, but we think the receiver
2031 * is still running, check to see if it really stopped while
2032 * we had interrupts off.
2033 * This allows for a fast restart without re-enabling interrupts.
2034 * This can happen when the RU sees the size change but also sees
2035 * the el bit set. */
2036 if ((le16_to_cpu(rfd->command) & cb_el) &&
2037 (RU_RUNNING == nic->ru_running)) {
2039 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2040 nic->ru_running = RU_SUSPENDED;
2043 /* Pull off the RFD and put the actual data (minus eth hdr) */
2044 skb_reserve(skb, sizeof(struct rfd));
2045 skb_put(skb, actual_size);
2046 skb->protocol = eth_type_trans(skb, nic->netdev);
2048 /* If we are receiving all frames, then don't bother
2049 * checking for errors.
2051 if (unlikely(dev->features & NETIF_F_RXALL)) {
2052 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2053 /* Received oversized frame, but keep it. */
2054 nic->rx_over_length_errors++;
2058 if (unlikely(!(rfd_status & cb_ok))) {
2059 /* Don't indicate if hardware indicates errors */
2060 dev_kfree_skb_any(skb);
2061 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2062 /* Don't indicate oversized frames */
2063 nic->rx_over_length_errors++;
2064 dev_kfree_skb_any(skb);
2067 dev->stats.rx_packets++;
2068 dev->stats.rx_bytes += (actual_size - fcs_pad);
2069 netif_receive_skb(skb);
2079 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2080 unsigned int work_to_do)
2083 int restart_required = 0, err = 0;
2084 struct rx *old_before_last_rx, *new_before_last_rx;
2085 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2087 /* Indicate newly arrived packets */
2088 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2089 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2090 /* Hit quota or no more to clean */
2091 if (-EAGAIN == err || -ENODATA == err)
2096 /* On EAGAIN, hit quota so have more work to do, restart once
2097 * cleanup is complete.
2098 * Else, are we already rnr? then pay attention!!! this ensures that
2099 * the state machine progression never allows a start with a
2100 * partially cleaned list, avoiding a race between hardware
2101 * and rx_to_clean when in NAPI mode */
2102 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2103 restart_required = 1;
2105 old_before_last_rx = nic->rx_to_use->prev->prev;
2106 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2108 /* Alloc new skbs to refill list */
2109 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2110 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2111 break; /* Better luck next time (see watchdog) */
2114 new_before_last_rx = nic->rx_to_use->prev->prev;
2115 if (new_before_last_rx != old_before_last_rx) {
2116 /* Set the el-bit on the buffer that is before the last buffer.
2117 * This lets us update the next pointer on the last buffer
2118 * without worrying about hardware touching it.
2119 * We set the size to 0 to prevent hardware from touching this
2121 * When the hardware hits the before last buffer with el-bit
2122 * and size of 0, it will RNR interrupt, the RUS will go into
2123 * the No Resources state. It will not complete nor write to
2125 new_before_last_rfd =
2126 (struct rfd *)new_before_last_rx->skb->data;
2127 new_before_last_rfd->size = 0;
2128 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2129 pci_dma_sync_single_for_device(nic->pdev,
2130 new_before_last_rx->dma_addr, sizeof(struct rfd),
2131 PCI_DMA_BIDIRECTIONAL);
2133 /* Now that we have a new stopping point, we can clear the old
2134 * stopping point. We must sync twice to get the proper
2135 * ordering on the hardware side of things. */
2136 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2137 pci_dma_sync_single_for_device(nic->pdev,
2138 old_before_last_rx->dma_addr, sizeof(struct rfd),
2139 PCI_DMA_BIDIRECTIONAL);
2140 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2142 pci_dma_sync_single_for_device(nic->pdev,
2143 old_before_last_rx->dma_addr, sizeof(struct rfd),
2144 PCI_DMA_BIDIRECTIONAL);
2147 if (restart_required) {
2149 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2150 e100_start_receiver(nic, nic->rx_to_clean);
2156 static void e100_rx_clean_list(struct nic *nic)
2159 unsigned int i, count = nic->params.rfds.count;
2161 nic->ru_running = RU_UNINITIALIZED;
2164 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2166 pci_unmap_single(nic->pdev, rx->dma_addr,
2167 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2168 dev_kfree_skb(rx->skb);
2175 nic->rx_to_use = nic->rx_to_clean = NULL;
2178 static int e100_rx_alloc_list(struct nic *nic)
2181 unsigned int i, count = nic->params.rfds.count;
2182 struct rfd *before_last;
2184 nic->rx_to_use = nic->rx_to_clean = NULL;
2185 nic->ru_running = RU_UNINITIALIZED;
2187 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2190 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2191 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2192 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2193 if (e100_rx_alloc_skb(nic, rx)) {
2194 e100_rx_clean_list(nic);
2198 /* Set the el-bit on the buffer that is before the last buffer.
2199 * This lets us update the next pointer on the last buffer without
2200 * worrying about hardware touching it.
2201 * We set the size to 0 to prevent hardware from touching this buffer.
2202 * When the hardware hits the before last buffer with el-bit and size
2203 * of 0, it will RNR interrupt, the RU will go into the No Resources
2204 * state. It will not complete nor write to this buffer. */
2205 rx = nic->rxs->prev->prev;
2206 before_last = (struct rfd *)rx->skb->data;
2207 before_last->command |= cpu_to_le16(cb_el);
2208 before_last->size = 0;
2209 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2210 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2212 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2213 nic->ru_running = RU_SUSPENDED;
2218 static irqreturn_t e100_intr(int irq, void *dev_id)
2220 struct net_device *netdev = dev_id;
2221 struct nic *nic = netdev_priv(netdev);
2222 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2224 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2225 "stat_ack = 0x%02X\n", stat_ack);
2227 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
2228 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2231 /* Ack interrupt(s) */
2232 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2234 /* We hit Receive No Resource (RNR); restart RU after cleaning */
2235 if (stat_ack & stat_ack_rnr)
2236 nic->ru_running = RU_SUSPENDED;
2238 if (likely(napi_schedule_prep(&nic->napi))) {
2239 e100_disable_irq(nic);
2240 __napi_schedule(&nic->napi);
2246 static int e100_poll(struct napi_struct *napi, int budget)
2248 struct nic *nic = container_of(napi, struct nic, napi);
2249 unsigned int work_done = 0;
2251 e100_rx_clean(nic, &work_done, budget);
2254 /* If budget not fully consumed, exit the polling mode */
2255 if (work_done < budget) {
2256 napi_complete_done(napi, work_done);
2257 e100_enable_irq(nic);
2263 #ifdef CONFIG_NET_POLL_CONTROLLER
2264 static void e100_netpoll(struct net_device *netdev)
2266 struct nic *nic = netdev_priv(netdev);
2268 e100_disable_irq(nic);
2269 e100_intr(nic->pdev->irq, netdev);
2271 e100_enable_irq(nic);
2275 static int e100_set_mac_address(struct net_device *netdev, void *p)
2277 struct nic *nic = netdev_priv(netdev);
2278 struct sockaddr *addr = p;
2280 if (!is_valid_ether_addr(addr->sa_data))
2281 return -EADDRNOTAVAIL;
2283 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2284 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2289 static int e100_asf(struct nic *nic)
2291 /* ASF can be enabled from eeprom */
2292 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2293 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2294 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2295 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2298 static int e100_up(struct nic *nic)
2302 if ((err = e100_rx_alloc_list(nic)))
2304 if ((err = e100_alloc_cbs(nic)))
2305 goto err_rx_clean_list;
2306 if ((err = e100_hw_init(nic)))
2308 e100_set_multicast_list(nic->netdev);
2309 e100_start_receiver(nic, NULL);
2310 mod_timer(&nic->watchdog, jiffies);
2311 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2312 nic->netdev->name, nic->netdev)))
2314 netif_wake_queue(nic->netdev);
2315 napi_enable(&nic->napi);
2316 /* enable ints _after_ enabling poll, preventing a race between
2317 * disable ints+schedule */
2318 e100_enable_irq(nic);
2322 del_timer_sync(&nic->watchdog);
2324 e100_clean_cbs(nic);
2326 e100_rx_clean_list(nic);
2330 static void e100_down(struct nic *nic)
2332 /* wait here for poll to complete */
2333 napi_disable(&nic->napi);
2334 netif_stop_queue(nic->netdev);
2336 free_irq(nic->pdev->irq, nic->netdev);
2337 del_timer_sync(&nic->watchdog);
2338 netif_carrier_off(nic->netdev);
2339 e100_clean_cbs(nic);
2340 e100_rx_clean_list(nic);
2343 static void e100_tx_timeout(struct net_device *netdev)
2345 struct nic *nic = netdev_priv(netdev);
2347 /* Reset outside of interrupt context, to avoid request_irq
2348 * in interrupt context */
2349 schedule_work(&nic->tx_timeout_task);
2352 static void e100_tx_timeout_task(struct work_struct *work)
2354 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2355 struct net_device *netdev = nic->netdev;
2357 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2358 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2361 if (netif_running(netdev)) {
2362 e100_down(netdev_priv(netdev));
2363 e100_up(netdev_priv(netdev));
2368 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2371 struct sk_buff *skb;
2373 /* Use driver resources to perform internal MAC or PHY
2374 * loopback test. A single packet is prepared and transmitted
2375 * in loopback mode, and the test passes if the received
2376 * packet compares byte-for-byte to the transmitted packet. */
2378 if ((err = e100_rx_alloc_list(nic)))
2380 if ((err = e100_alloc_cbs(nic)))
2383 /* ICH PHY loopback is broken so do MAC loopback instead */
2384 if (nic->flags & ich && loopback_mode == lb_phy)
2385 loopback_mode = lb_mac;
2387 nic->loopback = loopback_mode;
2388 if ((err = e100_hw_init(nic)))
2389 goto err_loopback_none;
2391 if (loopback_mode == lb_phy)
2392 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2395 e100_start_receiver(nic, NULL);
2397 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2399 goto err_loopback_none;
2401 skb_put(skb, ETH_DATA_LEN);
2402 memset(skb->data, 0xFF, ETH_DATA_LEN);
2403 e100_xmit_frame(skb, nic->netdev);
2407 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2408 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2410 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2411 skb->data, ETH_DATA_LEN))
2415 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2416 nic->loopback = lb_none;
2417 e100_clean_cbs(nic);
2420 e100_rx_clean_list(nic);
2424 #define MII_LED_CONTROL 0x1B
2425 #define E100_82552_LED_OVERRIDE 0x19
2426 #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2427 #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
2429 static int e100_get_link_ksettings(struct net_device *netdev,
2430 struct ethtool_link_ksettings *cmd)
2432 struct nic *nic = netdev_priv(netdev);
2434 mii_ethtool_get_link_ksettings(&nic->mii, cmd);
2439 static int e100_set_link_ksettings(struct net_device *netdev,
2440 const struct ethtool_link_ksettings *cmd)
2442 struct nic *nic = netdev_priv(netdev);
2445 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2446 err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
2447 e100_exec_cb(nic, NULL, e100_configure);
2452 static void e100_get_drvinfo(struct net_device *netdev,
2453 struct ethtool_drvinfo *info)
2455 struct nic *nic = netdev_priv(netdev);
2456 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2457 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2458 strlcpy(info->bus_info, pci_name(nic->pdev),
2459 sizeof(info->bus_info));
2462 #define E100_PHY_REGS 0x1C
2463 static int e100_get_regs_len(struct net_device *netdev)
2465 struct nic *nic = netdev_priv(netdev);
2466 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2469 static void e100_get_regs(struct net_device *netdev,
2470 struct ethtool_regs *regs, void *p)
2472 struct nic *nic = netdev_priv(netdev);
2476 regs->version = (1 << 24) | nic->pdev->revision;
2477 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2478 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2479 ioread16(&nic->csr->scb.status);
2480 for (i = E100_PHY_REGS; i >= 0; i--)
2481 buff[1 + E100_PHY_REGS - i] =
2482 mdio_read(netdev, nic->mii.phy_id, i);
2483 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2484 e100_exec_cb(nic, NULL, e100_dump);
2486 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2487 sizeof(nic->mem->dump_buf));
2490 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2492 struct nic *nic = netdev_priv(netdev);
2493 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2494 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2497 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2499 struct nic *nic = netdev_priv(netdev);
2501 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2502 !device_can_wakeup(&nic->pdev->dev))
2506 nic->flags |= wol_magic;
2508 nic->flags &= ~wol_magic;
2510 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2512 e100_exec_cb(nic, NULL, e100_configure);
2517 static u32 e100_get_msglevel(struct net_device *netdev)
2519 struct nic *nic = netdev_priv(netdev);
2520 return nic->msg_enable;
2523 static void e100_set_msglevel(struct net_device *netdev, u32 value)
2525 struct nic *nic = netdev_priv(netdev);
2526 nic->msg_enable = value;
2529 static int e100_nway_reset(struct net_device *netdev)
2531 struct nic *nic = netdev_priv(netdev);
2532 return mii_nway_restart(&nic->mii);
2535 static u32 e100_get_link(struct net_device *netdev)
2537 struct nic *nic = netdev_priv(netdev);
2538 return mii_link_ok(&nic->mii);
2541 static int e100_get_eeprom_len(struct net_device *netdev)
2543 struct nic *nic = netdev_priv(netdev);
2544 return nic->eeprom_wc << 1;
2547 #define E100_EEPROM_MAGIC 0x1234
2548 static int e100_get_eeprom(struct net_device *netdev,
2549 struct ethtool_eeprom *eeprom, u8 *bytes)
2551 struct nic *nic = netdev_priv(netdev);
2553 eeprom->magic = E100_EEPROM_MAGIC;
2554 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2559 static int e100_set_eeprom(struct net_device *netdev,
2560 struct ethtool_eeprom *eeprom, u8 *bytes)
2562 struct nic *nic = netdev_priv(netdev);
2564 if (eeprom->magic != E100_EEPROM_MAGIC)
2567 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2569 return e100_eeprom_save(nic, eeprom->offset >> 1,
2570 (eeprom->len >> 1) + 1);
2573 static void e100_get_ringparam(struct net_device *netdev,
2574 struct ethtool_ringparam *ring)
2576 struct nic *nic = netdev_priv(netdev);
2577 struct param_range *rfds = &nic->params.rfds;
2578 struct param_range *cbs = &nic->params.cbs;
2580 ring->rx_max_pending = rfds->max;
2581 ring->tx_max_pending = cbs->max;
2582 ring->rx_pending = rfds->count;
2583 ring->tx_pending = cbs->count;
2586 static int e100_set_ringparam(struct net_device *netdev,
2587 struct ethtool_ringparam *ring)
2589 struct nic *nic = netdev_priv(netdev);
2590 struct param_range *rfds = &nic->params.rfds;
2591 struct param_range *cbs = &nic->params.cbs;
2593 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2596 if (netif_running(netdev))
2598 rfds->count = max(ring->rx_pending, rfds->min);
2599 rfds->count = min(rfds->count, rfds->max);
2600 cbs->count = max(ring->tx_pending, cbs->min);
2601 cbs->count = min(cbs->count, cbs->max);
2602 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2603 rfds->count, cbs->count);
2604 if (netif_running(netdev))
2610 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2611 "Link test (on/offline)",
2612 "Eeprom test (on/offline)",
2613 "Self test (offline)",
2614 "Mac loopback (offline)",
2615 "Phy loopback (offline)",
2617 #define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2619 static void e100_diag_test(struct net_device *netdev,
2620 struct ethtool_test *test, u64 *data)
2622 struct ethtool_cmd cmd;
2623 struct nic *nic = netdev_priv(netdev);
2626 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2627 data[0] = !mii_link_ok(&nic->mii);
2628 data[1] = e100_eeprom_load(nic);
2629 if (test->flags & ETH_TEST_FL_OFFLINE) {
2631 /* save speed, duplex & autoneg settings */
2632 err = mii_ethtool_gset(&nic->mii, &cmd);
2634 if (netif_running(netdev))
2636 data[2] = e100_self_test(nic);
2637 data[3] = e100_loopback_test(nic, lb_mac);
2638 data[4] = e100_loopback_test(nic, lb_phy);
2640 /* restore speed, duplex & autoneg settings */
2641 err = mii_ethtool_sset(&nic->mii, &cmd);
2643 if (netif_running(netdev))
2646 for (i = 0; i < E100_TEST_LEN; i++)
2647 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2649 msleep_interruptible(4 * 1000);
2652 static int e100_set_phys_id(struct net_device *netdev,
2653 enum ethtool_phys_id_state state)
2655 struct nic *nic = netdev_priv(netdev);
2662 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2667 case ETHTOOL_ID_ACTIVE:
2671 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2672 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2675 case ETHTOOL_ID_OFF:
2676 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2679 case ETHTOOL_ID_INACTIVE:
2683 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2687 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2688 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2689 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2690 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2691 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2692 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2693 "tx_heartbeat_errors", "tx_window_errors",
2694 /* device-specific stats */
2695 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2696 "tx_flow_control_pause", "rx_flow_control_pause",
2697 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2698 "rx_short_frame_errors", "rx_over_length_errors",
2700 #define E100_NET_STATS_LEN 21
2701 #define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2703 static int e100_get_sset_count(struct net_device *netdev, int sset)
2707 return E100_TEST_LEN;
2709 return E100_STATS_LEN;
2715 static void e100_get_ethtool_stats(struct net_device *netdev,
2716 struct ethtool_stats *stats, u64 *data)
2718 struct nic *nic = netdev_priv(netdev);
2721 for (i = 0; i < E100_NET_STATS_LEN; i++)
2722 data[i] = ((unsigned long *)&netdev->stats)[i];
2724 data[i++] = nic->tx_deferred;
2725 data[i++] = nic->tx_single_collisions;
2726 data[i++] = nic->tx_multiple_collisions;
2727 data[i++] = nic->tx_fc_pause;
2728 data[i++] = nic->rx_fc_pause;
2729 data[i++] = nic->rx_fc_unsupported;
2730 data[i++] = nic->tx_tco_frames;
2731 data[i++] = nic->rx_tco_frames;
2732 data[i++] = nic->rx_short_frame_errors;
2733 data[i++] = nic->rx_over_length_errors;
2736 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2738 switch (stringset) {
2740 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2743 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2748 static const struct ethtool_ops e100_ethtool_ops = {
2749 .get_drvinfo = e100_get_drvinfo,
2750 .get_regs_len = e100_get_regs_len,
2751 .get_regs = e100_get_regs,
2752 .get_wol = e100_get_wol,
2753 .set_wol = e100_set_wol,
2754 .get_msglevel = e100_get_msglevel,
2755 .set_msglevel = e100_set_msglevel,
2756 .nway_reset = e100_nway_reset,
2757 .get_link = e100_get_link,
2758 .get_eeprom_len = e100_get_eeprom_len,
2759 .get_eeprom = e100_get_eeprom,
2760 .set_eeprom = e100_set_eeprom,
2761 .get_ringparam = e100_get_ringparam,
2762 .set_ringparam = e100_set_ringparam,
2763 .self_test = e100_diag_test,
2764 .get_strings = e100_get_strings,
2765 .set_phys_id = e100_set_phys_id,
2766 .get_ethtool_stats = e100_get_ethtool_stats,
2767 .get_sset_count = e100_get_sset_count,
2768 .get_ts_info = ethtool_op_get_ts_info,
2769 .get_link_ksettings = e100_get_link_ksettings,
2770 .set_link_ksettings = e100_set_link_ksettings,
2773 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2775 struct nic *nic = netdev_priv(netdev);
2777 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2780 static int e100_alloc(struct nic *nic)
2782 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2784 return nic->mem ? 0 : -ENOMEM;
2787 static void e100_free(struct nic *nic)
2790 pci_free_consistent(nic->pdev, sizeof(struct mem),
2791 nic->mem, nic->dma_addr);
2796 static int e100_open(struct net_device *netdev)
2798 struct nic *nic = netdev_priv(netdev);
2801 netif_carrier_off(netdev);
2802 if ((err = e100_up(nic)))
2803 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2807 static int e100_close(struct net_device *netdev)
2809 e100_down(netdev_priv(netdev));
2813 static int e100_set_features(struct net_device *netdev,
2814 netdev_features_t features)
2816 struct nic *nic = netdev_priv(netdev);
2817 netdev_features_t changed = features ^ netdev->features;
2819 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2822 netdev->features = features;
2823 e100_exec_cb(nic, NULL, e100_configure);
2827 static const struct net_device_ops e100_netdev_ops = {
2828 .ndo_open = e100_open,
2829 .ndo_stop = e100_close,
2830 .ndo_start_xmit = e100_xmit_frame,
2831 .ndo_validate_addr = eth_validate_addr,
2832 .ndo_set_rx_mode = e100_set_multicast_list,
2833 .ndo_set_mac_address = e100_set_mac_address,
2834 .ndo_do_ioctl = e100_do_ioctl,
2835 .ndo_tx_timeout = e100_tx_timeout,
2836 #ifdef CONFIG_NET_POLL_CONTROLLER
2837 .ndo_poll_controller = e100_netpoll,
2839 .ndo_set_features = e100_set_features,
2842 static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2844 struct net_device *netdev;
2848 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2851 netdev->hw_features |= NETIF_F_RXFCS;
2852 netdev->priv_flags |= IFF_SUPP_NOFCS;
2853 netdev->hw_features |= NETIF_F_RXALL;
2855 netdev->netdev_ops = &e100_netdev_ops;
2856 netdev->ethtool_ops = &e100_ethtool_ops;
2857 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2858 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2860 nic = netdev_priv(netdev);
2861 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2862 nic->netdev = netdev;
2864 nic->msg_enable = (1 << debug) - 1;
2865 nic->mdio_ctrl = mdio_ctrl_hw;
2866 pci_set_drvdata(pdev, netdev);
2868 if ((err = pci_enable_device(pdev))) {
2869 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2870 goto err_out_free_dev;
2873 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2874 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2876 goto err_out_disable_pdev;
2879 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2880 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2881 goto err_out_disable_pdev;
2884 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2885 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2886 goto err_out_free_res;
2889 SET_NETDEV_DEV(netdev, &pdev->dev);
2892 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2894 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2896 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2898 goto err_out_free_res;
2901 if (ent->driver_data)
2906 e100_get_defaults(nic);
2908 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2909 if (nic->mac < mac_82558_D101_A4)
2910 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2912 /* locks must be initialized before calling hw_reset */
2913 spin_lock_init(&nic->cb_lock);
2914 spin_lock_init(&nic->cmd_lock);
2915 spin_lock_init(&nic->mdio_lock);
2917 /* Reset the device before pci_set_master() in case device is in some
2918 * funky state and has an interrupt pending - hint: we don't have the
2919 * interrupt handler registered yet. */
2922 pci_set_master(pdev);
2924 setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
2926 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2928 if ((err = e100_alloc(nic))) {
2929 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2930 goto err_out_iounmap;
2933 if ((err = e100_eeprom_load(nic)))
2938 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2939 if (!is_valid_ether_addr(netdev->dev_addr)) {
2940 if (!eeprom_bad_csum_allow) {
2941 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2945 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2949 /* Wol magic packet can be enabled from eeprom */
2950 if ((nic->mac >= mac_82558_D101_A4) &&
2951 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2952 nic->flags |= wol_magic;
2953 device_set_wakeup_enable(&pdev->dev, true);
2956 /* ack any pending wake events, disable PME */
2957 pci_pme_active(pdev, false);
2959 strcpy(netdev->name, "eth%d");
2960 if ((err = register_netdev(netdev))) {
2961 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2964 nic->cbs_pool = pci_pool_create(netdev->name,
2966 nic->params.cbs.max * sizeof(struct cb),
2969 if (!nic->cbs_pool) {
2970 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
2974 netif_info(nic, probe, nic->netdev,
2975 "addr 0x%llx, irq %d, MAC addr %pM\n",
2976 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2977 pdev->irq, netdev->dev_addr);
2982 unregister_netdev(netdev);
2986 pci_iounmap(pdev, nic->csr);
2988 pci_release_regions(pdev);
2989 err_out_disable_pdev:
2990 pci_disable_device(pdev);
2992 free_netdev(netdev);
2996 static void e100_remove(struct pci_dev *pdev)
2998 struct net_device *netdev = pci_get_drvdata(pdev);
3001 struct nic *nic = netdev_priv(netdev);
3002 unregister_netdev(netdev);
3004 pci_iounmap(pdev, nic->csr);
3005 pci_pool_destroy(nic->cbs_pool);
3006 free_netdev(netdev);
3007 pci_release_regions(pdev);
3008 pci_disable_device(pdev);
3012 #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
3013 #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
3014 #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
3015 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3017 struct net_device *netdev = pci_get_drvdata(pdev);
3018 struct nic *nic = netdev_priv(netdev);
3020 if (netif_running(netdev))
3022 netif_device_detach(netdev);
3024 pci_save_state(pdev);
3026 if ((nic->flags & wol_magic) | e100_asf(nic)) {
3027 /* enable reverse auto-negotiation */
3028 if (nic->phy == phy_82552_v) {
3029 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3030 E100_82552_SMARTSPEED);
3032 mdio_write(netdev, nic->mii.phy_id,
3033 E100_82552_SMARTSPEED, smartspeed |
3034 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3036 *enable_wake = true;
3038 *enable_wake = false;
3041 pci_clear_master(pdev);
3044 static int __e100_power_off(struct pci_dev *pdev, bool wake)
3047 return pci_prepare_to_sleep(pdev);
3049 pci_wake_from_d3(pdev, false);
3050 pci_set_power_state(pdev, PCI_D3hot);
3056 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3059 __e100_shutdown(pdev, &wake);
3060 return __e100_power_off(pdev, wake);
3063 static int e100_resume(struct pci_dev *pdev)
3065 struct net_device *netdev = pci_get_drvdata(pdev);
3066 struct nic *nic = netdev_priv(netdev);
3068 pci_set_power_state(pdev, PCI_D0);
3069 pci_restore_state(pdev);
3070 /* ack any pending wake events, disable PME */
3071 pci_enable_wake(pdev, PCI_D0, 0);
3073 /* disable reverse auto-negotiation */
3074 if (nic->phy == phy_82552_v) {
3075 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3076 E100_82552_SMARTSPEED);
3078 mdio_write(netdev, nic->mii.phy_id,
3079 E100_82552_SMARTSPEED,
3080 smartspeed & ~(E100_82552_REV_ANEG));
3083 netif_device_attach(netdev);
3084 if (netif_running(netdev))
3089 #endif /* CONFIG_PM */
3091 static void e100_shutdown(struct pci_dev *pdev)
3094 __e100_shutdown(pdev, &wake);
3095 if (system_state == SYSTEM_POWER_OFF)
3096 __e100_power_off(pdev, wake);
3099 /* ------------------ PCI Error Recovery infrastructure -------------- */
3101 * e100_io_error_detected - called when PCI error is detected.
3102 * @pdev: Pointer to PCI device
3103 * @state: The current pci connection state
3105 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3107 struct net_device *netdev = pci_get_drvdata(pdev);
3108 struct nic *nic = netdev_priv(netdev);
3110 netif_device_detach(netdev);
3112 if (state == pci_channel_io_perm_failure)
3113 return PCI_ERS_RESULT_DISCONNECT;
3115 if (netif_running(netdev))
3117 pci_disable_device(pdev);
3119 /* Request a slot reset. */
3120 return PCI_ERS_RESULT_NEED_RESET;
3124 * e100_io_slot_reset - called after the pci bus has been reset.
3125 * @pdev: Pointer to PCI device
3127 * Restart the card from scratch.
3129 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3131 struct net_device *netdev = pci_get_drvdata(pdev);
3132 struct nic *nic = netdev_priv(netdev);
3134 if (pci_enable_device(pdev)) {
3135 pr_err("Cannot re-enable PCI device after reset\n");
3136 return PCI_ERS_RESULT_DISCONNECT;
3138 pci_set_master(pdev);
3140 /* Only one device per card can do a reset */
3141 if (0 != PCI_FUNC(pdev->devfn))
3142 return PCI_ERS_RESULT_RECOVERED;
3146 return PCI_ERS_RESULT_RECOVERED;
3150 * e100_io_resume - resume normal operations
3151 * @pdev: Pointer to PCI device
3153 * Resume normal operations after an error recovery
3154 * sequence has been completed.
3156 static void e100_io_resume(struct pci_dev *pdev)
3158 struct net_device *netdev = pci_get_drvdata(pdev);
3159 struct nic *nic = netdev_priv(netdev);
3161 /* ack any pending wake events, disable PME */
3162 pci_enable_wake(pdev, PCI_D0, 0);
3164 netif_device_attach(netdev);
3165 if (netif_running(netdev)) {
3167 mod_timer(&nic->watchdog, jiffies);
3171 static const struct pci_error_handlers e100_err_handler = {
3172 .error_detected = e100_io_error_detected,
3173 .slot_reset = e100_io_slot_reset,
3174 .resume = e100_io_resume,
3177 static struct pci_driver e100_driver = {
3179 .id_table = e100_id_table,
3180 .probe = e100_probe,
3181 .remove = e100_remove,
3183 /* Power Management hooks */
3184 .suspend = e100_suspend,
3185 .resume = e100_resume,
3187 .shutdown = e100_shutdown,
3188 .err_handler = &e100_err_handler,
3191 static int __init e100_init_module(void)
3193 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3194 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3195 pr_info("%s\n", DRV_COPYRIGHT);
3197 return pci_register_driver(&e100_driver);
3200 static void __exit e100_cleanup_module(void)
3202 pci_unregister_driver(&e100_driver);
3205 module_init(e100_init_module);
3206 module_exit(e100_cleanup_module);