1 /*******************************************************************************
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 * e100.c: Intel(R) PRO/100 ethernet driver
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
55 * II. Driver Operation
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
123 * scenario where all Rx resources have been indicated and none re-
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
137 * testing/troubleshooting the development driver.
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
150 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
152 #include <linux/hardirq.h>
153 #include <linux/interrupt.h>
154 #include <linux/module.h>
155 #include <linux/moduleparam.h>
156 #include <linux/kernel.h>
157 #include <linux/types.h>
158 #include <linux/sched.h>
159 #include <linux/slab.h>
160 #include <linux/delay.h>
161 #include <linux/init.h>
162 #include <linux/pci.h>
163 #include <linux/dma-mapping.h>
164 #include <linux/dmapool.h>
165 #include <linux/netdevice.h>
166 #include <linux/etherdevice.h>
167 #include <linux/mii.h>
168 #include <linux/if_vlan.h>
169 #include <linux/skbuff.h>
170 #include <linux/ethtool.h>
171 #include <linux/string.h>
172 #include <linux/firmware.h>
173 #include <linux/rtnetlink.h>
174 #include <asm/unaligned.h>
177 #define DRV_NAME "e100"
178 #define DRV_EXT "-NAPI"
179 #define DRV_VERSION "3.5.24-k2"DRV_EXT
180 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
181 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
183 #define E100_WATCHDOG_PERIOD (2 * HZ)
184 #define E100_NAPI_WEIGHT 16
186 #define FIRMWARE_D101M "e100/d101m_ucode.bin"
187 #define FIRMWARE_D101S "e100/d101s_ucode.bin"
188 #define FIRMWARE_D102E "e100/d102e_ucode.bin"
190 MODULE_DESCRIPTION(DRV_DESCRIPTION);
191 MODULE_AUTHOR(DRV_COPYRIGHT);
192 MODULE_LICENSE("GPL");
193 MODULE_VERSION(DRV_VERSION);
194 MODULE_FIRMWARE(FIRMWARE_D101M);
195 MODULE_FIRMWARE(FIRMWARE_D101S);
196 MODULE_FIRMWARE(FIRMWARE_D102E);
198 static int debug = 3;
199 static int eeprom_bad_csum_allow = 0;
200 static int use_io = 0;
201 module_param(debug, int, 0);
202 module_param(eeprom_bad_csum_allow, int, 0);
203 module_param(use_io, int, 0);
204 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
205 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
206 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
208 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211 static const struct pci_device_id e100_id_table[] = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
256 MODULE_DEVICE_TABLE(pci, e100_id_table);
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
267 mac_82550_D102_C = 13,
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
283 phy_82552_v = 0xd061004d,
284 phy_unknown = 0xFFFFFFFF,
287 /* CSR (Control/Status Registers) */
313 RU_UNINITIALIZED = -1,
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
329 irq_mask_none = 0x00,
337 ruc_load_base = 0x06,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
352 software_reset = 0x0000,
354 selective_reset = 0x0002,
357 enum eeprom_ctrl_lo {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
377 enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
379 eeprom_phy_iface = 0x06,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
385 enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
389 enum eeprom_phy_iface {
402 eeprom_id_wol = 0x0020,
405 enum eeprom_config_asf {
411 cb_complete = 0x8000,
416 * cb_command - Command Block flags
417 * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
445 struct rx *next, *prev;
450 #if defined(__BIG_ENDIAN_BITFIELD)
456 /*0*/ u8 X(byte_count:6, pad0:2);
457 /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
458 /*2*/ u8 adaptive_ifs;
459 /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460 term_write_cache_line:1), pad3:4);
461 /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
462 /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463 /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
465 rx_save_overruns : 1), rx_save_bad_frames : 1);
466 /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
469 /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470 /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472 /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
474 /*11*/ u8 X(linear_priority:3, pad11:5);
475 /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476 /*13*/ u8 ip_addr_lo;
477 /*14*/ u8 ip_addr_hi;
478 /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480 pad15_2:1), crs_or_cdt:1);
481 /*16*/ u8 fc_delay_lo;
482 /*17*/ u8 fc_delay_hi;
483 /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485 /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487 full_duplex_force:1), full_duplex_pin:1);
488 /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489 /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490 /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
494 #define E100_MAX_MULTICAST_ADDRS 64
497 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
500 /* Important: keep total struct u32-aligned */
501 #define UCODE_SIZE 134
508 __le32 ucode[UCODE_SIZE];
509 struct config config;
522 __le32 dump_buffer_addr;
524 struct cb *next, *prev;
530 lb_none = 0, lb_mac = 1, lb_phy = 3,
534 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
535 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
536 tx_multiple_collisions, tx_total_collisions;
537 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
538 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
539 rx_short_frame_errors;
540 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
541 __le16 xmt_tco_frames, rcv_tco_frames;
561 struct param_range rfds;
562 struct param_range cbs;
566 /* Begin: frequently used values: keep adjacent for cache effect */
567 u32 msg_enable ____cacheline_aligned;
568 struct net_device *netdev;
569 struct pci_dev *pdev;
570 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
572 struct rx *rxs ____cacheline_aligned;
573 struct rx *rx_to_use;
574 struct rx *rx_to_clean;
575 struct rfd blank_rfd;
576 enum ru_state ru_running;
578 spinlock_t cb_lock ____cacheline_aligned;
580 struct csr __iomem *csr;
581 enum scb_cmd_lo cuc_cmd;
582 unsigned int cbs_avail;
583 struct napi_struct napi;
585 struct cb *cb_to_use;
586 struct cb *cb_to_send;
587 struct cb *cb_to_clean;
589 /* End: frequently used values: keep adjacent for cache effect */
593 promiscuous = (1 << 1),
594 multicast_all = (1 << 2),
595 wol_magic = (1 << 3),
596 ich_10h_workaround = (1 << 4),
597 } flags ____cacheline_aligned;
601 struct params params;
602 struct timer_list watchdog;
603 struct mii_if_info mii;
604 struct work_struct tx_timeout_task;
605 enum loopback loopback;
610 struct pci_pool *cbs_pool;
611 dma_addr_t cbs_dma_addr;
617 u32 tx_single_collisions;
618 u32 tx_multiple_collisions;
623 u32 rx_fc_unsupported;
625 u32 rx_short_frame_errors;
626 u32 rx_over_length_errors;
630 spinlock_t mdio_lock;
631 const struct firmware *fw;
634 static inline void e100_write_flush(struct nic *nic)
636 /* Flush previous PCI writes through intermediate bridges
637 * by doing a benign read */
638 (void)ioread8(&nic->csr->scb.status);
641 static void e100_enable_irq(struct nic *nic)
645 spin_lock_irqsave(&nic->cmd_lock, flags);
646 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
647 e100_write_flush(nic);
648 spin_unlock_irqrestore(&nic->cmd_lock, flags);
651 static void e100_disable_irq(struct nic *nic)
655 spin_lock_irqsave(&nic->cmd_lock, flags);
656 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
657 e100_write_flush(nic);
658 spin_unlock_irqrestore(&nic->cmd_lock, flags);
661 static void e100_hw_reset(struct nic *nic)
663 /* Put CU and RU into idle with a selective reset to get
664 * device off of PCI bus */
665 iowrite32(selective_reset, &nic->csr->port);
666 e100_write_flush(nic); udelay(20);
668 /* Now fully reset device */
669 iowrite32(software_reset, &nic->csr->port);
670 e100_write_flush(nic); udelay(20);
672 /* Mask off our interrupt line - it's unmasked after reset */
673 e100_disable_irq(nic);
676 static int e100_self_test(struct nic *nic)
678 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
680 /* Passing the self-test is a pretty good indication
681 * that the device can DMA to/from host memory */
683 nic->mem->selftest.signature = 0;
684 nic->mem->selftest.result = 0xFFFFFFFF;
686 iowrite32(selftest | dma_addr, &nic->csr->port);
687 e100_write_flush(nic);
688 /* Wait 10 msec for self-test to complete */
691 /* Interrupts are enabled after self-test */
692 e100_disable_irq(nic);
694 /* Check results of self-test */
695 if (nic->mem->selftest.result != 0) {
696 netif_err(nic, hw, nic->netdev,
697 "Self-test failed: result=0x%08X\n",
698 nic->mem->selftest.result);
701 if (nic->mem->selftest.signature == 0) {
702 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
709 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
711 u32 cmd_addr_data[3];
715 /* Three cmds: write/erase enable, write data, write/erase disable */
716 cmd_addr_data[0] = op_ewen << (addr_len - 2);
717 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
719 cmd_addr_data[2] = op_ewds << (addr_len - 2);
721 /* Bit-bang cmds to write word to eeprom */
722 for (j = 0; j < 3; j++) {
725 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
726 e100_write_flush(nic); udelay(4);
728 for (i = 31; i >= 0; i--) {
729 ctrl = (cmd_addr_data[j] & (1 << i)) ?
731 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
732 e100_write_flush(nic); udelay(4);
734 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
735 e100_write_flush(nic); udelay(4);
737 /* Wait 10 msec for cmd to complete */
741 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
742 e100_write_flush(nic); udelay(4);
746 /* General technique stolen from the eepro100 driver - very clever */
747 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
754 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
757 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
758 e100_write_flush(nic); udelay(4);
760 /* Bit-bang to read word from eeprom */
761 for (i = 31; i >= 0; i--) {
762 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
763 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
764 e100_write_flush(nic); udelay(4);
766 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
767 e100_write_flush(nic); udelay(4);
769 /* Eeprom drives a dummy zero to EEDO after receiving
770 * complete address. Use this to adjust addr_len. */
771 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
772 if (!(ctrl & eedo) && i > 16) {
773 *addr_len -= (i - 16);
777 data = (data << 1) | (ctrl & eedo ? 1 : 0);
781 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
782 e100_write_flush(nic); udelay(4);
784 return cpu_to_le16(data);
787 /* Load entire EEPROM image into driver cache and validate checksum */
788 static int e100_eeprom_load(struct nic *nic)
790 u16 addr, addr_len = 8, checksum = 0;
792 /* Try reading with an 8-bit addr len to discover actual addr len */
793 e100_eeprom_read(nic, &addr_len, 0);
794 nic->eeprom_wc = 1 << addr_len;
796 for (addr = 0; addr < nic->eeprom_wc; addr++) {
797 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
798 if (addr < nic->eeprom_wc - 1)
799 checksum += le16_to_cpu(nic->eeprom[addr]);
802 /* The checksum, stored in the last word, is calculated such that
803 * the sum of words should be 0xBABA */
804 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
805 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
806 if (!eeprom_bad_csum_allow)
813 /* Save (portion of) driver EEPROM cache to device and update checksum */
814 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
816 u16 addr, addr_len = 8, checksum = 0;
818 /* Try reading with an 8-bit addr len to discover actual addr len */
819 e100_eeprom_read(nic, &addr_len, 0);
820 nic->eeprom_wc = 1 << addr_len;
822 if (start + count >= nic->eeprom_wc)
825 for (addr = start; addr < start + count; addr++)
826 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
828 /* The checksum, stored in the last word, is calculated such that
829 * the sum of words should be 0xBABA */
830 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
831 checksum += le16_to_cpu(nic->eeprom[addr]);
832 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
833 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834 nic->eeprom[nic->eeprom_wc - 1]);
839 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
840 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */
841 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
847 spin_lock_irqsave(&nic->cmd_lock, flags);
849 /* Previous command is accepted when SCB clears */
850 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
854 if (unlikely(i > E100_WAIT_SCB_FAST))
857 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
862 if (unlikely(cmd != cuc_resume))
863 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864 iowrite8(cmd, &nic->csr->scb.cmd_lo);
867 spin_unlock_irqrestore(&nic->cmd_lock, flags);
872 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
873 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
879 spin_lock_irqsave(&nic->cb_lock, flags);
881 if (unlikely(!nic->cbs_avail)) {
887 nic->cb_to_use = cb->next;
891 err = cb_prepare(nic, cb, skb);
895 if (unlikely(!nic->cbs_avail))
899 /* Order is important otherwise we'll be in a race with h/w:
900 * set S-bit in current first, then clear S-bit in previous. */
901 cb->command |= cpu_to_le16(cb_s);
903 cb->prev->command &= cpu_to_le16(~cb_s);
905 while (nic->cb_to_send != nic->cb_to_use) {
906 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
907 nic->cb_to_send->dma_addr))) {
908 /* Ok, here's where things get sticky. It's
909 * possible that we can't schedule the command
910 * because the controller is too busy, so
911 * let's just queue the command and try again
912 * when another command is scheduled. */
913 if (err == -ENOSPC) {
915 schedule_work(&nic->tx_timeout_task);
919 nic->cuc_cmd = cuc_resume;
920 nic->cb_to_send = nic->cb_to_send->next;
925 spin_unlock_irqrestore(&nic->cb_lock, flags);
930 static int mdio_read(struct net_device *netdev, int addr, int reg)
932 struct nic *nic = netdev_priv(netdev);
933 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
936 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
938 struct nic *nic = netdev_priv(netdev);
940 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
943 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
944 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
952 * Stratus87247: we shouldn't be writing the MDI control
953 * register until the Ready bit shows True. Also, since
954 * manipulation of the MDI control registers is a multi-step
955 * procedure it should be done under lock.
957 spin_lock_irqsave(&nic->mdio_lock, flags);
958 for (i = 100; i; --i) {
959 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
964 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
965 spin_unlock_irqrestore(&nic->mdio_lock, flags);
966 return 0; /* No way to indicate timeout error */
968 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
970 for (i = 0; i < 100; i++) {
972 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
975 spin_unlock_irqrestore(&nic->mdio_lock, flags);
976 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
977 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
978 dir == mdi_read ? "READ" : "WRITE",
979 addr, reg, data, data_out);
980 return (u16)data_out;
983 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
984 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
990 if ((reg == MII_BMCR) && (dir == mdi_write)) {
991 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
992 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
996 * Workaround Si issue where sometimes the part will not
997 * autoneg to 100Mbps even when advertised.
999 if (advert & ADVERTISE_100FULL)
1000 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
1001 else if (advert & ADVERTISE_100HALF)
1002 data |= BMCR_SPEED100;
1005 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1008 /* Fully software-emulated mdio_ctrl() function for cards without
1009 * MII-compliant PHYs.
1010 * For now, this is mainly geared towards 80c24 support; in case of further
1011 * requirements for other types (i82503, ...?) either extend this mechanism
1012 * or split it, whichever is cleaner.
1014 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1020 /* might need to allocate a netdev_priv'ed register array eventually
1021 * to be able to record state changes, but for now
1022 * some fully hardcoded register handling ought to be ok I guess. */
1024 if (dir == mdi_read) {
1027 /* Auto-negotiation, right? */
1028 return BMCR_ANENABLE |
1031 return BMSR_LSTATUS /* for mii_link_ok() */ |
1035 /* 80c24 is a "combo card" PHY, right? */
1036 return ADVERTISE_10HALF |
1039 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1040 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1041 dir == mdi_read ? "READ" : "WRITE",
1048 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1049 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1050 dir == mdi_read ? "READ" : "WRITE",
1056 static inline int e100_phy_supports_mii(struct nic *nic)
1058 /* for now, just check it by comparing whether we
1059 are using MII software emulation.
1061 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1064 static void e100_get_defaults(struct nic *nic)
1066 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1067 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1069 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
1070 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1071 if (nic->mac == mac_unknown)
1072 nic->mac = mac_82557_D100_A;
1074 nic->params.rfds = rfds;
1075 nic->params.cbs = cbs;
1077 /* Quadwords to DMA into FIFO before starting frame transmit */
1078 nic->tx_threshold = 0xE0;
1080 /* no interrupt for every tx completion, delay = 256us if not 557 */
1081 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1082 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1084 /* Template for a freshly allocated RFD */
1085 nic->blank_rfd.command = 0;
1086 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1087 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1090 nic->mii.phy_id_mask = 0x1F;
1091 nic->mii.reg_num_mask = 0x1F;
1092 nic->mii.dev = nic->netdev;
1093 nic->mii.mdio_read = mdio_read;
1094 nic->mii.mdio_write = mdio_write;
1097 static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1099 struct config *config = &cb->u.config;
1100 u8 *c = (u8 *)config;
1101 struct net_device *netdev = nic->netdev;
1103 cb->command = cpu_to_le16(cb_config);
1105 memset(config, 0, sizeof(struct config));
1107 config->byte_count = 0x16; /* bytes in this struct */
1108 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1109 config->direct_rx_dma = 0x1; /* reserved */
1110 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1111 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1112 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1113 config->tx_underrun_retry = 0x3; /* # of underrun retries */
1114 if (e100_phy_supports_mii(nic))
1115 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1116 config->pad10 = 0x6;
1117 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1118 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1119 config->ifs = 0x6; /* x16 = inter frame spacing */
1120 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1121 config->pad15_1 = 0x1;
1122 config->pad15_2 = 0x1;
1123 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1124 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1125 config->tx_padding = 0x1; /* 1=pad short frames */
1126 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1127 config->pad18 = 0x1;
1128 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1129 config->pad20_1 = 0x1F;
1130 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1131 config->pad21_1 = 0x5;
1133 config->adaptive_ifs = nic->adaptive_ifs;
1134 config->loopback = nic->loopback;
1136 if (nic->mii.force_media && nic->mii.full_duplex)
1137 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1139 if (nic->flags & promiscuous || nic->loopback) {
1140 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1141 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1142 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1145 if (unlikely(netdev->features & NETIF_F_RXFCS))
1146 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1148 if (nic->flags & multicast_all)
1149 config->multicast_all = 0x1; /* 1=accept, 0=no */
1151 /* disable WoL when up */
1152 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1153 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1155 if (nic->mac >= mac_82558_D101_A4) {
1156 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1157 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1158 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1159 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1160 if (nic->mac >= mac_82559_D101M) {
1161 config->tno_intr = 0x1; /* TCO stats enable */
1162 /* Enable TCO in extended config */
1163 if (nic->mac >= mac_82551_10) {
1164 config->byte_count = 0x20; /* extended bytes */
1165 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1168 config->standard_stat_counter = 0x0;
1172 if (netdev->features & NETIF_F_RXALL) {
1173 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1174 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1175 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1180 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1182 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1187 /*************************************************************************
1188 * CPUSaver parameters
1190 * All CPUSaver parameters are 16-bit literals that are part of a
1191 * "move immediate value" instruction. By changing the value of
1192 * the literal in the instruction before the code is loaded, the
1193 * driver can change the algorithm.
1195 * INTDELAY - This loads the dead-man timer with its initial value.
1196 * When this timer expires the interrupt is asserted, and the
1197 * timer is reset each time a new packet is received. (see
1198 * BUNDLEMAX below to set the limit on number of chained packets)
1199 * The current default is 0x600 or 1536. Experiments show that
1200 * the value should probably stay within the 0x200 - 0x1000.
1203 * This sets the maximum number of frames that will be bundled. In
1204 * some situations, such as the TCP windowing algorithm, it may be
1205 * better to limit the growth of the bundle size than let it go as
1206 * high as it can, because that could cause too much added latency.
1207 * The default is six, because this is the number of packets in the
1208 * default TCP window size. A value of 1 would make CPUSaver indicate
1209 * an interrupt for every frame received. If you do not want to put
1210 * a limit on the bundle size, set this value to xFFFF.
1213 * This contains a bit-mask describing the minimum size frame that
1214 * will be bundled. The default masks the lower 7 bits, which means
1215 * that any frame less than 128 bytes in length will not be bundled,
1216 * but will instead immediately generate an interrupt. This does
1217 * not affect the current bundle in any way. Any frame that is 128
1218 * bytes or large will be bundled normally. This feature is meant
1219 * to provide immediate indication of ACK frames in a TCP environment.
1220 * Customers were seeing poor performance when a machine with CPUSaver
1221 * enabled was sending but not receiving. The delay introduced when
1222 * the ACKs were received was enough to reduce total throughput, because
1223 * the sender would sit idle until the ACK was finally seen.
1225 * The current default is 0xFF80, which masks out the lower 7 bits.
1226 * This means that any frame which is x7F (127) bytes or smaller
1227 * will cause an immediate interrupt. Because this value must be a
1228 * bit mask, there are only a few valid values that can be used. To
1229 * turn this feature off, the driver can write the value xFFFF to the
1230 * lower word of this instruction (in the same way that the other
1231 * parameters are used). Likewise, a value of 0xF800 (2047) would
1232 * cause an interrupt to be generated for every frame, because all
1233 * standard Ethernet frames are <= 2047 bytes in length.
1234 *************************************************************************/
1236 /* if you wish to disable the ucode functionality, while maintaining the
1237 * workarounds it provides, set the following defines to:
1242 #define BUNDLESMALL 1
1243 #define BUNDLEMAX (u16)6
1244 #define INTDELAY (u16)1536 /* 0x600 */
1246 /* Initialize firmware */
1247 static const struct firmware *e100_request_firmware(struct nic *nic)
1249 const char *fw_name;
1250 const struct firmware *fw = nic->fw;
1251 u8 timer, bundle, min_size;
1253 bool required = false;
1255 /* do not load u-code for ICH devices */
1256 if (nic->flags & ich)
1259 /* Search for ucode match against h/w revision
1261 * Based on comments in the source code for the FreeBSD fxp
1262 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
1264 * "fixes for bugs in the B-step hardware (specifically, bugs
1265 * with Inline Receive)."
1267 * So we must fail if it cannot be loaded.
1269 * The other microcode files are only required for the optional
1270 * CPUSaver feature. Nice to have, but no reason to fail.
1272 if (nic->mac == mac_82559_D101M) {
1273 fw_name = FIRMWARE_D101M;
1274 } else if (nic->mac == mac_82559_D101S) {
1275 fw_name = FIRMWARE_D101S;
1276 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1277 fw_name = FIRMWARE_D102E;
1279 } else { /* No ucode on other devices */
1283 /* If the firmware has not previously been loaded, request a pointer
1284 * to it. If it was previously loaded, we are reinitializing the
1285 * adapter, possibly in a resume from hibernate, in which case
1286 * request_firmware() cannot be used.
1289 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1293 netif_err(nic, probe, nic->netdev,
1294 "Failed to load firmware \"%s\": %d\n",
1296 return ERR_PTR(err);
1298 netif_info(nic, probe, nic->netdev,
1299 "CPUSaver disabled. Needs \"%s\": %d\n",
1305 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1306 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1307 if (fw->size != UCODE_SIZE * 4 + 3) {
1308 netif_err(nic, probe, nic->netdev,
1309 "Firmware \"%s\" has wrong size %zu\n",
1311 release_firmware(fw);
1312 return ERR_PTR(-EINVAL);
1315 /* Read timer, bundle and min_size from end of firmware blob */
1316 timer = fw->data[UCODE_SIZE * 4];
1317 bundle = fw->data[UCODE_SIZE * 4 + 1];
1318 min_size = fw->data[UCODE_SIZE * 4 + 2];
1320 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1321 min_size >= UCODE_SIZE) {
1322 netif_err(nic, probe, nic->netdev,
1323 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1324 fw_name, timer, bundle, min_size);
1325 release_firmware(fw);
1326 return ERR_PTR(-EINVAL);
1329 /* OK, firmware is validated and ready to use. Save a pointer
1330 * to it in the nic */
1335 static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1336 struct sk_buff *skb)
1338 const struct firmware *fw = (void *)skb;
1339 u8 timer, bundle, min_size;
1341 /* It's not a real skb; we just abused the fact that e100_exec_cb
1342 will pass it through to here... */
1345 /* firmware is stored as little endian already */
1346 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1348 /* Read timer, bundle and min_size from end of firmware blob */
1349 timer = fw->data[UCODE_SIZE * 4];
1350 bundle = fw->data[UCODE_SIZE * 4 + 1];
1351 min_size = fw->data[UCODE_SIZE * 4 + 2];
1353 /* Insert user-tunable settings in cb->u.ucode */
1354 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1355 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1356 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1357 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1358 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1359 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1361 cb->command = cpu_to_le16(cb_ucode | cb_el);
1365 static inline int e100_load_ucode_wait(struct nic *nic)
1367 const struct firmware *fw;
1368 int err = 0, counter = 50;
1369 struct cb *cb = nic->cb_to_clean;
1371 fw = e100_request_firmware(nic);
1372 /* If it's NULL, then no ucode is required */
1373 if (!fw || IS_ERR(fw))
1376 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1377 netif_err(nic, probe, nic->netdev,
1378 "ucode cmd failed with error %d\n", err);
1380 /* must restart cuc */
1381 nic->cuc_cmd = cuc_start;
1383 /* wait for completion */
1384 e100_write_flush(nic);
1387 /* wait for possibly (ouch) 500ms */
1388 while (!(cb->status & cpu_to_le16(cb_complete))) {
1390 if (!--counter) break;
1393 /* ack any interrupts, something could have been set */
1394 iowrite8(~0, &nic->csr->scb.stat_ack);
1396 /* if the command failed, or is not OK, notify and return */
1397 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1398 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1405 static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1406 struct sk_buff *skb)
1408 cb->command = cpu_to_le16(cb_iaaddr);
1409 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1413 static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1415 cb->command = cpu_to_le16(cb_dump);
1416 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1417 offsetof(struct mem, dump_buf));
1421 static int e100_phy_check_without_mii(struct nic *nic)
1426 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1429 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1430 case I82503: /* Non-MII PHY; UNTESTED! */
1431 case S80C24: /* Non-MII PHY; tested and working */
1432 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1433 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1434 * doesn't have a programming interface of any sort. The
1435 * media is sensed automatically based on how the link partner
1436 * is configured. This is, in essence, manual configuration.
1438 netif_info(nic, probe, nic->netdev,
1439 "found MII-less i82503 or 80c24 or other PHY\n");
1441 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1442 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1444 /* these might be needed for certain MII-less cards...
1445 * nic->flags |= ich;
1446 * nic->flags |= ich_10h_workaround; */
1457 #define NCONFIG_AUTO_SWITCH 0x0080
1458 #define MII_NSC_CONG MII_RESV1
1459 #define NSC_CONG_ENABLE 0x0100
1460 #define NSC_CONG_TXREADY 0x0400
1461 #define ADVERTISE_FC_SUPPORTED 0x0400
1462 static int e100_phy_init(struct nic *nic)
1464 struct net_device *netdev = nic->netdev;
1466 u16 bmcr, stat, id_lo, id_hi, cong;
1468 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1469 for (addr = 0; addr < 32; addr++) {
1470 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1471 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1472 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1473 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1474 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1478 /* uhoh, no PHY detected: check whether we seem to be some
1479 * weird, rare variant which is *known* to not have any MII.
1480 * But do this AFTER MII checking only, since this does
1481 * lookup of EEPROM values which may easily be unreliable. */
1482 if (e100_phy_check_without_mii(nic))
1483 return 0; /* simply return and hope for the best */
1485 /* for unknown cases log a fatal error */
1486 netif_err(nic, hw, nic->netdev,
1487 "Failed to locate any known PHY, aborting\n");
1491 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1492 "phy_addr = %d\n", nic->mii.phy_id);
1495 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1496 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1497 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1498 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1499 "phy ID = 0x%08X\n", nic->phy);
1501 /* Select the phy and isolate the rest */
1502 for (addr = 0; addr < 32; addr++) {
1503 if (addr != nic->mii.phy_id) {
1504 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1505 } else if (nic->phy != phy_82552_v) {
1506 bmcr = mdio_read(netdev, addr, MII_BMCR);
1507 mdio_write(netdev, addr, MII_BMCR,
1508 bmcr & ~BMCR_ISOLATE);
1512 * Workaround for 82552:
1513 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1514 * other phy_id's) using bmcr value from addr discovery loop above.
1516 if (nic->phy == phy_82552_v)
1517 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1518 bmcr & ~BMCR_ISOLATE);
1520 /* Handle National tx phys */
1521 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1522 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1523 /* Disable congestion control */
1524 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1525 cong |= NSC_CONG_TXREADY;
1526 cong &= ~NSC_CONG_ENABLE;
1527 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1530 if (nic->phy == phy_82552_v) {
1531 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1533 /* assign special tweaked mdio_ctrl() function */
1534 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1536 /* Workaround Si not advertising flow-control during autoneg */
1537 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1538 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1540 /* Reset for the above changes to take effect */
1541 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1543 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1544 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1545 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1546 (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1547 /* enable/disable MDI/MDI-X auto-switching. */
1548 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1549 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1555 static int e100_hw_init(struct nic *nic)
1561 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1562 if (!in_interrupt() && (err = e100_self_test(nic)))
1565 if ((err = e100_phy_init(nic)))
1567 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1569 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1571 if ((err = e100_load_ucode_wait(nic)))
1573 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1575 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1577 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1578 nic->dma_addr + offsetof(struct mem, stats))))
1580 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1583 e100_disable_irq(nic);
1588 static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1590 struct net_device *netdev = nic->netdev;
1591 struct netdev_hw_addr *ha;
1592 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1594 cb->command = cpu_to_le16(cb_multi);
1595 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1597 netdev_for_each_mc_addr(ha, netdev) {
1600 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1606 static void e100_set_multicast_list(struct net_device *netdev)
1608 struct nic *nic = netdev_priv(netdev);
1610 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1611 "mc_count=%d, flags=0x%04X\n",
1612 netdev_mc_count(netdev), netdev->flags);
1614 if (netdev->flags & IFF_PROMISC)
1615 nic->flags |= promiscuous;
1617 nic->flags &= ~promiscuous;
1619 if (netdev->flags & IFF_ALLMULTI ||
1620 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1621 nic->flags |= multicast_all;
1623 nic->flags &= ~multicast_all;
1625 e100_exec_cb(nic, NULL, e100_configure);
1626 e100_exec_cb(nic, NULL, e100_multi);
1629 static void e100_update_stats(struct nic *nic)
1631 struct net_device *dev = nic->netdev;
1632 struct net_device_stats *ns = &dev->stats;
1633 struct stats *s = &nic->mem->stats;
1634 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1635 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1638 /* Device's stats reporting may take several microseconds to
1639 * complete, so we're always waiting for results of the
1640 * previous command. */
1642 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1644 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1645 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1646 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1647 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1648 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1649 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1650 ns->collisions += nic->tx_collisions;
1651 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1652 le32_to_cpu(s->tx_lost_crs);
1653 nic->rx_short_frame_errors +=
1654 le32_to_cpu(s->rx_short_frame_errors);
1655 ns->rx_length_errors = nic->rx_short_frame_errors +
1656 nic->rx_over_length_errors;
1657 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1658 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1659 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1660 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1661 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1662 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1663 le32_to_cpu(s->rx_alignment_errors) +
1664 le32_to_cpu(s->rx_short_frame_errors) +
1665 le32_to_cpu(s->rx_cdt_errors);
1666 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1667 nic->tx_single_collisions +=
1668 le32_to_cpu(s->tx_single_collisions);
1669 nic->tx_multiple_collisions +=
1670 le32_to_cpu(s->tx_multiple_collisions);
1671 if (nic->mac >= mac_82558_D101_A4) {
1672 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1673 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1674 nic->rx_fc_unsupported +=
1675 le32_to_cpu(s->fc_rcv_unsupported);
1676 if (nic->mac >= mac_82559_D101M) {
1677 nic->tx_tco_frames +=
1678 le16_to_cpu(s->xmt_tco_frames);
1679 nic->rx_tco_frames +=
1680 le16_to_cpu(s->rcv_tco_frames);
1686 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1687 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1688 "exec cuc_dump_reset failed\n");
1691 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1693 /* Adjust inter-frame-spacing (IFS) between two transmits if
1694 * we're getting collisions on a half-duplex connection. */
1696 if (duplex == DUPLEX_HALF) {
1697 u32 prev = nic->adaptive_ifs;
1698 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1700 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1701 (nic->tx_frames > min_frames)) {
1702 if (nic->adaptive_ifs < 60)
1703 nic->adaptive_ifs += 5;
1704 } else if (nic->tx_frames < min_frames) {
1705 if (nic->adaptive_ifs >= 5)
1706 nic->adaptive_ifs -= 5;
1708 if (nic->adaptive_ifs != prev)
1709 e100_exec_cb(nic, NULL, e100_configure);
1713 static void e100_watchdog(struct timer_list *t)
1715 struct nic *nic = from_timer(nic, t, watchdog);
1716 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1719 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1720 "right now = %ld\n", jiffies);
1722 /* mii library handles link maintenance tasks */
1724 mii_ethtool_gset(&nic->mii, &cmd);
1725 speed = ethtool_cmd_speed(&cmd);
1727 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1728 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1729 speed == SPEED_100 ? 100 : 10,
1730 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1731 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1732 netdev_info(nic->netdev, "NIC Link is Down\n");
1735 mii_check_link(&nic->mii);
1737 /* Software generated interrupt to recover from (rare) Rx
1738 * allocation failure.
1739 * Unfortunately have to use a spinlock to not re-enable interrupts
1740 * accidentally, due to hardware that shares a register between the
1741 * interrupt mask bit and the SW Interrupt generation bit */
1742 spin_lock_irq(&nic->cmd_lock);
1743 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1744 e100_write_flush(nic);
1745 spin_unlock_irq(&nic->cmd_lock);
1747 e100_update_stats(nic);
1748 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1750 if (nic->mac <= mac_82557_D100_C)
1751 /* Issue a multicast command to workaround a 557 lock up */
1752 e100_set_multicast_list(nic->netdev);
1754 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1755 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1756 nic->flags |= ich_10h_workaround;
1758 nic->flags &= ~ich_10h_workaround;
1760 mod_timer(&nic->watchdog,
1761 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1764 static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1765 struct sk_buff *skb)
1767 dma_addr_t dma_addr;
1768 cb->command = nic->tx_command;
1770 dma_addr = pci_map_single(nic->pdev,
1771 skb->data, skb->len, PCI_DMA_TODEVICE);
1772 /* If we can't map the skb, have the upper layer try later */
1773 if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
1774 dev_kfree_skb_any(skb);
1780 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1781 * testing, ie sending frames with bad CRC.
1783 if (unlikely(skb->no_fcs))
1784 cb->command |= cpu_to_le16(cb_tx_nc);
1786 cb->command &= ~cpu_to_le16(cb_tx_nc);
1788 /* interrupt every 16 packets regardless of delay */
1789 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1790 cb->command |= cpu_to_le16(cb_i);
1791 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1792 cb->u.tcb.tcb_byte_count = 0;
1793 cb->u.tcb.threshold = nic->tx_threshold;
1794 cb->u.tcb.tbd_count = 1;
1795 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1796 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1797 skb_tx_timestamp(skb);
1801 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1802 struct net_device *netdev)
1804 struct nic *nic = netdev_priv(netdev);
1807 if (nic->flags & ich_10h_workaround) {
1808 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1809 Issue a NOP command followed by a 1us delay before
1810 issuing the Tx command. */
1811 if (e100_exec_cmd(nic, cuc_nop, 0))
1812 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1813 "exec cuc_nop failed\n");
1817 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1821 /* We queued the skb, but now we're out of space. */
1822 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1823 "No space for CB\n");
1824 netif_stop_queue(netdev);
1827 /* This is a hard error - log it. */
1828 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1829 "Out of Tx resources, returning skb\n");
1830 netif_stop_queue(netdev);
1831 return NETDEV_TX_BUSY;
1834 return NETDEV_TX_OK;
1837 static int e100_tx_clean(struct nic *nic)
1839 struct net_device *dev = nic->netdev;
1843 spin_lock(&nic->cb_lock);
1845 /* Clean CBs marked complete */
1846 for (cb = nic->cb_to_clean;
1847 cb->status & cpu_to_le16(cb_complete);
1848 cb = nic->cb_to_clean = cb->next) {
1849 dma_rmb(); /* read skb after status */
1850 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1851 "cb[%d]->status = 0x%04X\n",
1852 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1855 if (likely(cb->skb != NULL)) {
1856 dev->stats.tx_packets++;
1857 dev->stats.tx_bytes += cb->skb->len;
1859 pci_unmap_single(nic->pdev,
1860 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1861 le16_to_cpu(cb->u.tcb.tbd.size),
1863 dev_kfree_skb_any(cb->skb);
1871 spin_unlock(&nic->cb_lock);
1873 /* Recover from running out of Tx resources in xmit_frame */
1874 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1875 netif_wake_queue(nic->netdev);
1880 static void e100_clean_cbs(struct nic *nic)
1883 while (nic->cbs_avail != nic->params.cbs.count) {
1884 struct cb *cb = nic->cb_to_clean;
1886 pci_unmap_single(nic->pdev,
1887 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1888 le16_to_cpu(cb->u.tcb.tbd.size),
1890 dev_kfree_skb(cb->skb);
1892 nic->cb_to_clean = nic->cb_to_clean->next;
1895 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1899 nic->cuc_cmd = cuc_start;
1900 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1904 static int e100_alloc_cbs(struct nic *nic)
1907 unsigned int i, count = nic->params.cbs.count;
1909 nic->cuc_cmd = cuc_start;
1910 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1913 nic->cbs = pci_pool_zalloc(nic->cbs_pool, GFP_KERNEL,
1914 &nic->cbs_dma_addr);
1918 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1919 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1920 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1922 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1923 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1924 ((i+1) % count) * sizeof(struct cb));
1927 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1928 nic->cbs_avail = count;
1933 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1935 if (!nic->rxs) return;
1936 if (RU_SUSPENDED != nic->ru_running) return;
1938 /* handle init time starts */
1939 if (!rx) rx = nic->rxs;
1941 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1943 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1944 nic->ru_running = RU_RUNNING;
1948 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1949 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1951 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1954 /* Init, and map the RFD. */
1955 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1956 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1957 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1959 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1960 dev_kfree_skb_any(rx->skb);
1966 /* Link the RFD to end of RFA by linking previous RFD to
1967 * this one. We are safe to touch the previous RFD because
1968 * it is protected by the before last buffer's el bit being set */
1969 if (rx->prev->skb) {
1970 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1971 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1972 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1973 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1979 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1980 unsigned int *work_done, unsigned int work_to_do)
1982 struct net_device *dev = nic->netdev;
1983 struct sk_buff *skb = rx->skb;
1984 struct rfd *rfd = (struct rfd *)skb->data;
1985 u16 rfd_status, actual_size;
1988 if (unlikely(work_done && *work_done >= work_to_do))
1991 /* Need to sync before taking a peek at cb_complete bit */
1992 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1993 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1994 rfd_status = le16_to_cpu(rfd->status);
1996 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1997 "status=0x%04X\n", rfd_status);
1998 dma_rmb(); /* read size after status bit */
2000 /* If data isn't ready, nothing to indicate */
2001 if (unlikely(!(rfd_status & cb_complete))) {
2002 /* If the next buffer has the el bit, but we think the receiver
2003 * is still running, check to see if it really stopped while
2004 * we had interrupts off.
2005 * This allows for a fast restart without re-enabling
2007 if ((le16_to_cpu(rfd->command) & cb_el) &&
2008 (RU_RUNNING == nic->ru_running))
2010 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2011 nic->ru_running = RU_SUSPENDED;
2012 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2014 PCI_DMA_FROMDEVICE);
2018 /* Get actual data size */
2019 if (unlikely(dev->features & NETIF_F_RXFCS))
2021 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
2022 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
2023 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2026 pci_unmap_single(nic->pdev, rx->dma_addr,
2027 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2029 /* If this buffer has the el bit, but we think the receiver
2030 * is still running, check to see if it really stopped while
2031 * we had interrupts off.
2032 * This allows for a fast restart without re-enabling interrupts.
2033 * This can happen when the RU sees the size change but also sees
2034 * the el bit set. */
2035 if ((le16_to_cpu(rfd->command) & cb_el) &&
2036 (RU_RUNNING == nic->ru_running)) {
2038 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2039 nic->ru_running = RU_SUSPENDED;
2042 /* Pull off the RFD and put the actual data (minus eth hdr) */
2043 skb_reserve(skb, sizeof(struct rfd));
2044 skb_put(skb, actual_size);
2045 skb->protocol = eth_type_trans(skb, nic->netdev);
2047 /* If we are receiving all frames, then don't bother
2048 * checking for errors.
2050 if (unlikely(dev->features & NETIF_F_RXALL)) {
2051 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2052 /* Received oversized frame, but keep it. */
2053 nic->rx_over_length_errors++;
2057 if (unlikely(!(rfd_status & cb_ok))) {
2058 /* Don't indicate if hardware indicates errors */
2059 dev_kfree_skb_any(skb);
2060 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2061 /* Don't indicate oversized frames */
2062 nic->rx_over_length_errors++;
2063 dev_kfree_skb_any(skb);
2066 dev->stats.rx_packets++;
2067 dev->stats.rx_bytes += (actual_size - fcs_pad);
2068 netif_receive_skb(skb);
2078 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2079 unsigned int work_to_do)
2082 int restart_required = 0, err = 0;
2083 struct rx *old_before_last_rx, *new_before_last_rx;
2084 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2086 /* Indicate newly arrived packets */
2087 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2088 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2089 /* Hit quota or no more to clean */
2090 if (-EAGAIN == err || -ENODATA == err)
2095 /* On EAGAIN, hit quota so have more work to do, restart once
2096 * cleanup is complete.
2097 * Else, are we already rnr? then pay attention!!! this ensures that
2098 * the state machine progression never allows a start with a
2099 * partially cleaned list, avoiding a race between hardware
2100 * and rx_to_clean when in NAPI mode */
2101 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2102 restart_required = 1;
2104 old_before_last_rx = nic->rx_to_use->prev->prev;
2105 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2107 /* Alloc new skbs to refill list */
2108 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2109 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2110 break; /* Better luck next time (see watchdog) */
2113 new_before_last_rx = nic->rx_to_use->prev->prev;
2114 if (new_before_last_rx != old_before_last_rx) {
2115 /* Set the el-bit on the buffer that is before the last buffer.
2116 * This lets us update the next pointer on the last buffer
2117 * without worrying about hardware touching it.
2118 * We set the size to 0 to prevent hardware from touching this
2120 * When the hardware hits the before last buffer with el-bit
2121 * and size of 0, it will RNR interrupt, the RUS will go into
2122 * the No Resources state. It will not complete nor write to
2124 new_before_last_rfd =
2125 (struct rfd *)new_before_last_rx->skb->data;
2126 new_before_last_rfd->size = 0;
2127 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2128 pci_dma_sync_single_for_device(nic->pdev,
2129 new_before_last_rx->dma_addr, sizeof(struct rfd),
2130 PCI_DMA_BIDIRECTIONAL);
2132 /* Now that we have a new stopping point, we can clear the old
2133 * stopping point. We must sync twice to get the proper
2134 * ordering on the hardware side of things. */
2135 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2136 pci_dma_sync_single_for_device(nic->pdev,
2137 old_before_last_rx->dma_addr, sizeof(struct rfd),
2138 PCI_DMA_BIDIRECTIONAL);
2139 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2141 pci_dma_sync_single_for_device(nic->pdev,
2142 old_before_last_rx->dma_addr, sizeof(struct rfd),
2143 PCI_DMA_BIDIRECTIONAL);
2146 if (restart_required) {
2148 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2149 e100_start_receiver(nic, nic->rx_to_clean);
2155 static void e100_rx_clean_list(struct nic *nic)
2158 unsigned int i, count = nic->params.rfds.count;
2160 nic->ru_running = RU_UNINITIALIZED;
2163 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2165 pci_unmap_single(nic->pdev, rx->dma_addr,
2166 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2167 dev_kfree_skb(rx->skb);
2174 nic->rx_to_use = nic->rx_to_clean = NULL;
2177 static int e100_rx_alloc_list(struct nic *nic)
2180 unsigned int i, count = nic->params.rfds.count;
2181 struct rfd *before_last;
2183 nic->rx_to_use = nic->rx_to_clean = NULL;
2184 nic->ru_running = RU_UNINITIALIZED;
2186 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2189 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2190 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2191 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2192 if (e100_rx_alloc_skb(nic, rx)) {
2193 e100_rx_clean_list(nic);
2197 /* Set the el-bit on the buffer that is before the last buffer.
2198 * This lets us update the next pointer on the last buffer without
2199 * worrying about hardware touching it.
2200 * We set the size to 0 to prevent hardware from touching this buffer.
2201 * When the hardware hits the before last buffer with el-bit and size
2202 * of 0, it will RNR interrupt, the RU will go into the No Resources
2203 * state. It will not complete nor write to this buffer. */
2204 rx = nic->rxs->prev->prev;
2205 before_last = (struct rfd *)rx->skb->data;
2206 before_last->command |= cpu_to_le16(cb_el);
2207 before_last->size = 0;
2208 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2209 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2211 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2212 nic->ru_running = RU_SUSPENDED;
2217 static irqreturn_t e100_intr(int irq, void *dev_id)
2219 struct net_device *netdev = dev_id;
2220 struct nic *nic = netdev_priv(netdev);
2221 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2223 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2224 "stat_ack = 0x%02X\n", stat_ack);
2226 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
2227 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2230 /* Ack interrupt(s) */
2231 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2233 /* We hit Receive No Resource (RNR); restart RU after cleaning */
2234 if (stat_ack & stat_ack_rnr)
2235 nic->ru_running = RU_SUSPENDED;
2237 if (likely(napi_schedule_prep(&nic->napi))) {
2238 e100_disable_irq(nic);
2239 __napi_schedule(&nic->napi);
2245 static int e100_poll(struct napi_struct *napi, int budget)
2247 struct nic *nic = container_of(napi, struct nic, napi);
2248 unsigned int work_done = 0;
2250 e100_rx_clean(nic, &work_done, budget);
2253 /* If budget not fully consumed, exit the polling mode */
2254 if (work_done < budget) {
2255 napi_complete_done(napi, work_done);
2256 e100_enable_irq(nic);
2262 #ifdef CONFIG_NET_POLL_CONTROLLER
2263 static void e100_netpoll(struct net_device *netdev)
2265 struct nic *nic = netdev_priv(netdev);
2267 e100_disable_irq(nic);
2268 e100_intr(nic->pdev->irq, netdev);
2270 e100_enable_irq(nic);
2274 static int e100_set_mac_address(struct net_device *netdev, void *p)
2276 struct nic *nic = netdev_priv(netdev);
2277 struct sockaddr *addr = p;
2279 if (!is_valid_ether_addr(addr->sa_data))
2280 return -EADDRNOTAVAIL;
2282 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2283 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2288 static int e100_asf(struct nic *nic)
2290 /* ASF can be enabled from eeprom */
2291 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2292 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2293 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2294 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2297 static int e100_up(struct nic *nic)
2301 if ((err = e100_rx_alloc_list(nic)))
2303 if ((err = e100_alloc_cbs(nic)))
2304 goto err_rx_clean_list;
2305 if ((err = e100_hw_init(nic)))
2307 e100_set_multicast_list(nic->netdev);
2308 e100_start_receiver(nic, NULL);
2309 mod_timer(&nic->watchdog, jiffies);
2310 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2311 nic->netdev->name, nic->netdev)))
2313 netif_wake_queue(nic->netdev);
2314 napi_enable(&nic->napi);
2315 /* enable ints _after_ enabling poll, preventing a race between
2316 * disable ints+schedule */
2317 e100_enable_irq(nic);
2321 del_timer_sync(&nic->watchdog);
2323 e100_clean_cbs(nic);
2325 e100_rx_clean_list(nic);
2329 static void e100_down(struct nic *nic)
2331 /* wait here for poll to complete */
2332 napi_disable(&nic->napi);
2333 netif_stop_queue(nic->netdev);
2335 free_irq(nic->pdev->irq, nic->netdev);
2336 del_timer_sync(&nic->watchdog);
2337 netif_carrier_off(nic->netdev);
2338 e100_clean_cbs(nic);
2339 e100_rx_clean_list(nic);
2342 static void e100_tx_timeout(struct net_device *netdev)
2344 struct nic *nic = netdev_priv(netdev);
2346 /* Reset outside of interrupt context, to avoid request_irq
2347 * in interrupt context */
2348 schedule_work(&nic->tx_timeout_task);
2351 static void e100_tx_timeout_task(struct work_struct *work)
2353 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2354 struct net_device *netdev = nic->netdev;
2356 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2357 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2360 if (netif_running(netdev)) {
2361 e100_down(netdev_priv(netdev));
2362 e100_up(netdev_priv(netdev));
2367 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2370 struct sk_buff *skb;
2372 /* Use driver resources to perform internal MAC or PHY
2373 * loopback test. A single packet is prepared and transmitted
2374 * in loopback mode, and the test passes if the received
2375 * packet compares byte-for-byte to the transmitted packet. */
2377 if ((err = e100_rx_alloc_list(nic)))
2379 if ((err = e100_alloc_cbs(nic)))
2382 /* ICH PHY loopback is broken so do MAC loopback instead */
2383 if (nic->flags & ich && loopback_mode == lb_phy)
2384 loopback_mode = lb_mac;
2386 nic->loopback = loopback_mode;
2387 if ((err = e100_hw_init(nic)))
2388 goto err_loopback_none;
2390 if (loopback_mode == lb_phy)
2391 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2394 e100_start_receiver(nic, NULL);
2396 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2398 goto err_loopback_none;
2400 skb_put(skb, ETH_DATA_LEN);
2401 memset(skb->data, 0xFF, ETH_DATA_LEN);
2402 e100_xmit_frame(skb, nic->netdev);
2406 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2407 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2409 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2410 skb->data, ETH_DATA_LEN))
2414 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2415 nic->loopback = lb_none;
2416 e100_clean_cbs(nic);
2419 e100_rx_clean_list(nic);
2423 #define MII_LED_CONTROL 0x1B
2424 #define E100_82552_LED_OVERRIDE 0x19
2425 #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2426 #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
2428 static int e100_get_link_ksettings(struct net_device *netdev,
2429 struct ethtool_link_ksettings *cmd)
2431 struct nic *nic = netdev_priv(netdev);
2433 mii_ethtool_get_link_ksettings(&nic->mii, cmd);
2438 static int e100_set_link_ksettings(struct net_device *netdev,
2439 const struct ethtool_link_ksettings *cmd)
2441 struct nic *nic = netdev_priv(netdev);
2444 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2445 err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
2446 e100_exec_cb(nic, NULL, e100_configure);
2451 static void e100_get_drvinfo(struct net_device *netdev,
2452 struct ethtool_drvinfo *info)
2454 struct nic *nic = netdev_priv(netdev);
2455 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2456 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2457 strlcpy(info->bus_info, pci_name(nic->pdev),
2458 sizeof(info->bus_info));
2461 #define E100_PHY_REGS 0x1C
2462 static int e100_get_regs_len(struct net_device *netdev)
2464 struct nic *nic = netdev_priv(netdev);
2465 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2468 static void e100_get_regs(struct net_device *netdev,
2469 struct ethtool_regs *regs, void *p)
2471 struct nic *nic = netdev_priv(netdev);
2475 regs->version = (1 << 24) | nic->pdev->revision;
2476 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2477 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2478 ioread16(&nic->csr->scb.status);
2479 for (i = E100_PHY_REGS; i >= 0; i--)
2480 buff[1 + E100_PHY_REGS - i] =
2481 mdio_read(netdev, nic->mii.phy_id, i);
2482 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2483 e100_exec_cb(nic, NULL, e100_dump);
2485 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2486 sizeof(nic->mem->dump_buf));
2489 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2491 struct nic *nic = netdev_priv(netdev);
2492 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2493 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2496 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2498 struct nic *nic = netdev_priv(netdev);
2500 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2501 !device_can_wakeup(&nic->pdev->dev))
2505 nic->flags |= wol_magic;
2507 nic->flags &= ~wol_magic;
2509 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2511 e100_exec_cb(nic, NULL, e100_configure);
2516 static u32 e100_get_msglevel(struct net_device *netdev)
2518 struct nic *nic = netdev_priv(netdev);
2519 return nic->msg_enable;
2522 static void e100_set_msglevel(struct net_device *netdev, u32 value)
2524 struct nic *nic = netdev_priv(netdev);
2525 nic->msg_enable = value;
2528 static int e100_nway_reset(struct net_device *netdev)
2530 struct nic *nic = netdev_priv(netdev);
2531 return mii_nway_restart(&nic->mii);
2534 static u32 e100_get_link(struct net_device *netdev)
2536 struct nic *nic = netdev_priv(netdev);
2537 return mii_link_ok(&nic->mii);
2540 static int e100_get_eeprom_len(struct net_device *netdev)
2542 struct nic *nic = netdev_priv(netdev);
2543 return nic->eeprom_wc << 1;
2546 #define E100_EEPROM_MAGIC 0x1234
2547 static int e100_get_eeprom(struct net_device *netdev,
2548 struct ethtool_eeprom *eeprom, u8 *bytes)
2550 struct nic *nic = netdev_priv(netdev);
2552 eeprom->magic = E100_EEPROM_MAGIC;
2553 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2558 static int e100_set_eeprom(struct net_device *netdev,
2559 struct ethtool_eeprom *eeprom, u8 *bytes)
2561 struct nic *nic = netdev_priv(netdev);
2563 if (eeprom->magic != E100_EEPROM_MAGIC)
2566 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2568 return e100_eeprom_save(nic, eeprom->offset >> 1,
2569 (eeprom->len >> 1) + 1);
2572 static void e100_get_ringparam(struct net_device *netdev,
2573 struct ethtool_ringparam *ring)
2575 struct nic *nic = netdev_priv(netdev);
2576 struct param_range *rfds = &nic->params.rfds;
2577 struct param_range *cbs = &nic->params.cbs;
2579 ring->rx_max_pending = rfds->max;
2580 ring->tx_max_pending = cbs->max;
2581 ring->rx_pending = rfds->count;
2582 ring->tx_pending = cbs->count;
2585 static int e100_set_ringparam(struct net_device *netdev,
2586 struct ethtool_ringparam *ring)
2588 struct nic *nic = netdev_priv(netdev);
2589 struct param_range *rfds = &nic->params.rfds;
2590 struct param_range *cbs = &nic->params.cbs;
2592 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2595 if (netif_running(netdev))
2597 rfds->count = max(ring->rx_pending, rfds->min);
2598 rfds->count = min(rfds->count, rfds->max);
2599 cbs->count = max(ring->tx_pending, cbs->min);
2600 cbs->count = min(cbs->count, cbs->max);
2601 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2602 rfds->count, cbs->count);
2603 if (netif_running(netdev))
2609 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2610 "Link test (on/offline)",
2611 "Eeprom test (on/offline)",
2612 "Self test (offline)",
2613 "Mac loopback (offline)",
2614 "Phy loopback (offline)",
2616 #define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2618 static void e100_diag_test(struct net_device *netdev,
2619 struct ethtool_test *test, u64 *data)
2621 struct ethtool_cmd cmd;
2622 struct nic *nic = netdev_priv(netdev);
2625 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2626 data[0] = !mii_link_ok(&nic->mii);
2627 data[1] = e100_eeprom_load(nic);
2628 if (test->flags & ETH_TEST_FL_OFFLINE) {
2630 /* save speed, duplex & autoneg settings */
2631 err = mii_ethtool_gset(&nic->mii, &cmd);
2633 if (netif_running(netdev))
2635 data[2] = e100_self_test(nic);
2636 data[3] = e100_loopback_test(nic, lb_mac);
2637 data[4] = e100_loopback_test(nic, lb_phy);
2639 /* restore speed, duplex & autoneg settings */
2640 err = mii_ethtool_sset(&nic->mii, &cmd);
2642 if (netif_running(netdev))
2645 for (i = 0; i < E100_TEST_LEN; i++)
2646 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2648 msleep_interruptible(4 * 1000);
2651 static int e100_set_phys_id(struct net_device *netdev,
2652 enum ethtool_phys_id_state state)
2654 struct nic *nic = netdev_priv(netdev);
2661 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2666 case ETHTOOL_ID_ACTIVE:
2670 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2671 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2674 case ETHTOOL_ID_OFF:
2675 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2678 case ETHTOOL_ID_INACTIVE:
2682 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2686 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2687 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2688 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2689 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2690 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2691 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2692 "tx_heartbeat_errors", "tx_window_errors",
2693 /* device-specific stats */
2694 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2695 "tx_flow_control_pause", "rx_flow_control_pause",
2696 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2697 "rx_short_frame_errors", "rx_over_length_errors",
2699 #define E100_NET_STATS_LEN 21
2700 #define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2702 static int e100_get_sset_count(struct net_device *netdev, int sset)
2706 return E100_TEST_LEN;
2708 return E100_STATS_LEN;
2714 static void e100_get_ethtool_stats(struct net_device *netdev,
2715 struct ethtool_stats *stats, u64 *data)
2717 struct nic *nic = netdev_priv(netdev);
2720 for (i = 0; i < E100_NET_STATS_LEN; i++)
2721 data[i] = ((unsigned long *)&netdev->stats)[i];
2723 data[i++] = nic->tx_deferred;
2724 data[i++] = nic->tx_single_collisions;
2725 data[i++] = nic->tx_multiple_collisions;
2726 data[i++] = nic->tx_fc_pause;
2727 data[i++] = nic->rx_fc_pause;
2728 data[i++] = nic->rx_fc_unsupported;
2729 data[i++] = nic->tx_tco_frames;
2730 data[i++] = nic->rx_tco_frames;
2731 data[i++] = nic->rx_short_frame_errors;
2732 data[i++] = nic->rx_over_length_errors;
2735 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2737 switch (stringset) {
2739 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2742 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2747 static const struct ethtool_ops e100_ethtool_ops = {
2748 .get_drvinfo = e100_get_drvinfo,
2749 .get_regs_len = e100_get_regs_len,
2750 .get_regs = e100_get_regs,
2751 .get_wol = e100_get_wol,
2752 .set_wol = e100_set_wol,
2753 .get_msglevel = e100_get_msglevel,
2754 .set_msglevel = e100_set_msglevel,
2755 .nway_reset = e100_nway_reset,
2756 .get_link = e100_get_link,
2757 .get_eeprom_len = e100_get_eeprom_len,
2758 .get_eeprom = e100_get_eeprom,
2759 .set_eeprom = e100_set_eeprom,
2760 .get_ringparam = e100_get_ringparam,
2761 .set_ringparam = e100_set_ringparam,
2762 .self_test = e100_diag_test,
2763 .get_strings = e100_get_strings,
2764 .set_phys_id = e100_set_phys_id,
2765 .get_ethtool_stats = e100_get_ethtool_stats,
2766 .get_sset_count = e100_get_sset_count,
2767 .get_ts_info = ethtool_op_get_ts_info,
2768 .get_link_ksettings = e100_get_link_ksettings,
2769 .set_link_ksettings = e100_set_link_ksettings,
2772 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2774 struct nic *nic = netdev_priv(netdev);
2776 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2779 static int e100_alloc(struct nic *nic)
2781 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2783 return nic->mem ? 0 : -ENOMEM;
2786 static void e100_free(struct nic *nic)
2789 pci_free_consistent(nic->pdev, sizeof(struct mem),
2790 nic->mem, nic->dma_addr);
2795 static int e100_open(struct net_device *netdev)
2797 struct nic *nic = netdev_priv(netdev);
2800 netif_carrier_off(netdev);
2801 if ((err = e100_up(nic)))
2802 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2806 static int e100_close(struct net_device *netdev)
2808 e100_down(netdev_priv(netdev));
2812 static int e100_set_features(struct net_device *netdev,
2813 netdev_features_t features)
2815 struct nic *nic = netdev_priv(netdev);
2816 netdev_features_t changed = features ^ netdev->features;
2818 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2821 netdev->features = features;
2822 e100_exec_cb(nic, NULL, e100_configure);
2826 static const struct net_device_ops e100_netdev_ops = {
2827 .ndo_open = e100_open,
2828 .ndo_stop = e100_close,
2829 .ndo_start_xmit = e100_xmit_frame,
2830 .ndo_validate_addr = eth_validate_addr,
2831 .ndo_set_rx_mode = e100_set_multicast_list,
2832 .ndo_set_mac_address = e100_set_mac_address,
2833 .ndo_do_ioctl = e100_do_ioctl,
2834 .ndo_tx_timeout = e100_tx_timeout,
2835 #ifdef CONFIG_NET_POLL_CONTROLLER
2836 .ndo_poll_controller = e100_netpoll,
2838 .ndo_set_features = e100_set_features,
2841 static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2843 struct net_device *netdev;
2847 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2850 netdev->hw_features |= NETIF_F_RXFCS;
2851 netdev->priv_flags |= IFF_SUPP_NOFCS;
2852 netdev->hw_features |= NETIF_F_RXALL;
2854 netdev->netdev_ops = &e100_netdev_ops;
2855 netdev->ethtool_ops = &e100_ethtool_ops;
2856 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2857 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2859 nic = netdev_priv(netdev);
2860 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2861 nic->netdev = netdev;
2863 nic->msg_enable = (1 << debug) - 1;
2864 nic->mdio_ctrl = mdio_ctrl_hw;
2865 pci_set_drvdata(pdev, netdev);
2867 if ((err = pci_enable_device(pdev))) {
2868 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2869 goto err_out_free_dev;
2872 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2873 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2875 goto err_out_disable_pdev;
2878 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2879 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2880 goto err_out_disable_pdev;
2883 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2884 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2885 goto err_out_free_res;
2888 SET_NETDEV_DEV(netdev, &pdev->dev);
2891 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2893 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2895 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2897 goto err_out_free_res;
2900 if (ent->driver_data)
2905 e100_get_defaults(nic);
2907 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2908 if (nic->mac < mac_82558_D101_A4)
2909 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2911 /* locks must be initialized before calling hw_reset */
2912 spin_lock_init(&nic->cb_lock);
2913 spin_lock_init(&nic->cmd_lock);
2914 spin_lock_init(&nic->mdio_lock);
2916 /* Reset the device before pci_set_master() in case device is in some
2917 * funky state and has an interrupt pending - hint: we don't have the
2918 * interrupt handler registered yet. */
2921 pci_set_master(pdev);
2923 timer_setup(&nic->watchdog, e100_watchdog, 0);
2925 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2927 if ((err = e100_alloc(nic))) {
2928 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2929 goto err_out_iounmap;
2932 if ((err = e100_eeprom_load(nic)))
2937 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2938 if (!is_valid_ether_addr(netdev->dev_addr)) {
2939 if (!eeprom_bad_csum_allow) {
2940 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2944 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2948 /* Wol magic packet can be enabled from eeprom */
2949 if ((nic->mac >= mac_82558_D101_A4) &&
2950 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2951 nic->flags |= wol_magic;
2952 device_set_wakeup_enable(&pdev->dev, true);
2955 /* ack any pending wake events, disable PME */
2956 pci_pme_active(pdev, false);
2958 strcpy(netdev->name, "eth%d");
2959 if ((err = register_netdev(netdev))) {
2960 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2963 nic->cbs_pool = pci_pool_create(netdev->name,
2965 nic->params.cbs.max * sizeof(struct cb),
2968 if (!nic->cbs_pool) {
2969 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
2973 netif_info(nic, probe, nic->netdev,
2974 "addr 0x%llx, irq %d, MAC addr %pM\n",
2975 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2976 pdev->irq, netdev->dev_addr);
2981 unregister_netdev(netdev);
2985 pci_iounmap(pdev, nic->csr);
2987 pci_release_regions(pdev);
2988 err_out_disable_pdev:
2989 pci_disable_device(pdev);
2991 free_netdev(netdev);
2995 static void e100_remove(struct pci_dev *pdev)
2997 struct net_device *netdev = pci_get_drvdata(pdev);
3000 struct nic *nic = netdev_priv(netdev);
3001 unregister_netdev(netdev);
3003 pci_iounmap(pdev, nic->csr);
3004 pci_pool_destroy(nic->cbs_pool);
3005 free_netdev(netdev);
3006 pci_release_regions(pdev);
3007 pci_disable_device(pdev);
3011 #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
3012 #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
3013 #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
3014 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3016 struct net_device *netdev = pci_get_drvdata(pdev);
3017 struct nic *nic = netdev_priv(netdev);
3019 if (netif_running(netdev))
3021 netif_device_detach(netdev);
3023 pci_save_state(pdev);
3025 if ((nic->flags & wol_magic) | e100_asf(nic)) {
3026 /* enable reverse auto-negotiation */
3027 if (nic->phy == phy_82552_v) {
3028 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3029 E100_82552_SMARTSPEED);
3031 mdio_write(netdev, nic->mii.phy_id,
3032 E100_82552_SMARTSPEED, smartspeed |
3033 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3035 *enable_wake = true;
3037 *enable_wake = false;
3040 pci_clear_master(pdev);
3043 static int __e100_power_off(struct pci_dev *pdev, bool wake)
3046 return pci_prepare_to_sleep(pdev);
3048 pci_wake_from_d3(pdev, false);
3049 pci_set_power_state(pdev, PCI_D3hot);
3055 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3058 __e100_shutdown(pdev, &wake);
3059 return __e100_power_off(pdev, wake);
3062 static int e100_resume(struct pci_dev *pdev)
3064 struct net_device *netdev = pci_get_drvdata(pdev);
3065 struct nic *nic = netdev_priv(netdev);
3067 pci_set_power_state(pdev, PCI_D0);
3068 pci_restore_state(pdev);
3069 /* ack any pending wake events, disable PME */
3070 pci_enable_wake(pdev, PCI_D0, 0);
3072 /* disable reverse auto-negotiation */
3073 if (nic->phy == phy_82552_v) {
3074 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3075 E100_82552_SMARTSPEED);
3077 mdio_write(netdev, nic->mii.phy_id,
3078 E100_82552_SMARTSPEED,
3079 smartspeed & ~(E100_82552_REV_ANEG));
3082 netif_device_attach(netdev);
3083 if (netif_running(netdev))
3088 #endif /* CONFIG_PM */
3090 static void e100_shutdown(struct pci_dev *pdev)
3093 __e100_shutdown(pdev, &wake);
3094 if (system_state == SYSTEM_POWER_OFF)
3095 __e100_power_off(pdev, wake);
3098 /* ------------------ PCI Error Recovery infrastructure -------------- */
3100 * e100_io_error_detected - called when PCI error is detected.
3101 * @pdev: Pointer to PCI device
3102 * @state: The current pci connection state
3104 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3106 struct net_device *netdev = pci_get_drvdata(pdev);
3107 struct nic *nic = netdev_priv(netdev);
3109 netif_device_detach(netdev);
3111 if (state == pci_channel_io_perm_failure)
3112 return PCI_ERS_RESULT_DISCONNECT;
3114 if (netif_running(netdev))
3116 pci_disable_device(pdev);
3118 /* Request a slot reset. */
3119 return PCI_ERS_RESULT_NEED_RESET;
3123 * e100_io_slot_reset - called after the pci bus has been reset.
3124 * @pdev: Pointer to PCI device
3126 * Restart the card from scratch.
3128 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3130 struct net_device *netdev = pci_get_drvdata(pdev);
3131 struct nic *nic = netdev_priv(netdev);
3133 if (pci_enable_device(pdev)) {
3134 pr_err("Cannot re-enable PCI device after reset\n");
3135 return PCI_ERS_RESULT_DISCONNECT;
3137 pci_set_master(pdev);
3139 /* Only one device per card can do a reset */
3140 if (0 != PCI_FUNC(pdev->devfn))
3141 return PCI_ERS_RESULT_RECOVERED;
3145 return PCI_ERS_RESULT_RECOVERED;
3149 * e100_io_resume - resume normal operations
3150 * @pdev: Pointer to PCI device
3152 * Resume normal operations after an error recovery
3153 * sequence has been completed.
3155 static void e100_io_resume(struct pci_dev *pdev)
3157 struct net_device *netdev = pci_get_drvdata(pdev);
3158 struct nic *nic = netdev_priv(netdev);
3160 /* ack any pending wake events, disable PME */
3161 pci_enable_wake(pdev, PCI_D0, 0);
3163 netif_device_attach(netdev);
3164 if (netif_running(netdev)) {
3166 mod_timer(&nic->watchdog, jiffies);
3170 static const struct pci_error_handlers e100_err_handler = {
3171 .error_detected = e100_io_error_detected,
3172 .slot_reset = e100_io_slot_reset,
3173 .resume = e100_io_resume,
3176 static struct pci_driver e100_driver = {
3178 .id_table = e100_id_table,
3179 .probe = e100_probe,
3180 .remove = e100_remove,
3182 /* Power Management hooks */
3183 .suspend = e100_suspend,
3184 .resume = e100_resume,
3186 .shutdown = e100_shutdown,
3187 .err_handler = &e100_err_handler,
3190 static int __init e100_init_module(void)
3192 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3193 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3194 pr_info("%s\n", DRV_COPYRIGHT);
3196 return pci_register_driver(&e100_driver);
3199 static void __exit e100_cleanup_module(void)
3201 pci_unregister_driver(&e100_driver);
3204 module_init(e100_init_module);
3205 module_exit(e100_cleanup_module);