1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include <net/ip6_checksum.h>
33 /* Intel Media SOC GbE MDIO physical base address */
34 static unsigned long ce4100_gbe_mdio_base_phy;
35 /* Intel Media SOC GbE MDIO virtual base address */
36 void __iomem *ce4100_gbe_mdio_base_virt;
38 char e1000_driver_name[] = "e1000";
39 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
40 #define DRV_VERSION "7.3.21-k8-NAPI"
41 const char e1000_driver_version[] = DRV_VERSION;
42 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
44 /* e1000_pci_tbl - PCI Device ID Table
46 * Last entry must be all 0s
49 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
51 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
52 INTEL_E1000_ETHERNET_DEVICE(0x1000),
53 INTEL_E1000_ETHERNET_DEVICE(0x1001),
54 INTEL_E1000_ETHERNET_DEVICE(0x1004),
55 INTEL_E1000_ETHERNET_DEVICE(0x1008),
56 INTEL_E1000_ETHERNET_DEVICE(0x1009),
57 INTEL_E1000_ETHERNET_DEVICE(0x100C),
58 INTEL_E1000_ETHERNET_DEVICE(0x100D),
59 INTEL_E1000_ETHERNET_DEVICE(0x100E),
60 INTEL_E1000_ETHERNET_DEVICE(0x100F),
61 INTEL_E1000_ETHERNET_DEVICE(0x1010),
62 INTEL_E1000_ETHERNET_DEVICE(0x1011),
63 INTEL_E1000_ETHERNET_DEVICE(0x1012),
64 INTEL_E1000_ETHERNET_DEVICE(0x1013),
65 INTEL_E1000_ETHERNET_DEVICE(0x1014),
66 INTEL_E1000_ETHERNET_DEVICE(0x1015),
67 INTEL_E1000_ETHERNET_DEVICE(0x1016),
68 INTEL_E1000_ETHERNET_DEVICE(0x1017),
69 INTEL_E1000_ETHERNET_DEVICE(0x1018),
70 INTEL_E1000_ETHERNET_DEVICE(0x1019),
71 INTEL_E1000_ETHERNET_DEVICE(0x101A),
72 INTEL_E1000_ETHERNET_DEVICE(0x101D),
73 INTEL_E1000_ETHERNET_DEVICE(0x101E),
74 INTEL_E1000_ETHERNET_DEVICE(0x1026),
75 INTEL_E1000_ETHERNET_DEVICE(0x1027),
76 INTEL_E1000_ETHERNET_DEVICE(0x1028),
77 INTEL_E1000_ETHERNET_DEVICE(0x1075),
78 INTEL_E1000_ETHERNET_DEVICE(0x1076),
79 INTEL_E1000_ETHERNET_DEVICE(0x1077),
80 INTEL_E1000_ETHERNET_DEVICE(0x1078),
81 INTEL_E1000_ETHERNET_DEVICE(0x1079),
82 INTEL_E1000_ETHERNET_DEVICE(0x107A),
83 INTEL_E1000_ETHERNET_DEVICE(0x107B),
84 INTEL_E1000_ETHERNET_DEVICE(0x107C),
85 INTEL_E1000_ETHERNET_DEVICE(0x108A),
86 INTEL_E1000_ETHERNET_DEVICE(0x1099),
87 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
88 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
89 /* required last entry */
93 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
95 int e1000_up(struct e1000_adapter *adapter);
96 void e1000_down(struct e1000_adapter *adapter);
97 void e1000_reinit_locked(struct e1000_adapter *adapter);
98 void e1000_reset(struct e1000_adapter *adapter);
99 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
100 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
101 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
102 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
103 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
104 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
105 struct e1000_tx_ring *txdr);
106 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
107 struct e1000_rx_ring *rxdr);
108 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
109 struct e1000_tx_ring *tx_ring);
110 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
111 struct e1000_rx_ring *rx_ring);
112 void e1000_update_stats(struct e1000_adapter *adapter);
114 static int e1000_init_module(void);
115 static void e1000_exit_module(void);
116 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
117 static void __devexit e1000_remove(struct pci_dev *pdev);
118 static int e1000_alloc_queues(struct e1000_adapter *adapter);
119 static int e1000_sw_init(struct e1000_adapter *adapter);
120 static int e1000_open(struct net_device *netdev);
121 static int e1000_close(struct net_device *netdev);
122 static void e1000_configure_tx(struct e1000_adapter *adapter);
123 static void e1000_configure_rx(struct e1000_adapter *adapter);
124 static void e1000_setup_rctl(struct e1000_adapter *adapter);
125 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
126 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
127 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
128 struct e1000_tx_ring *tx_ring);
129 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
130 struct e1000_rx_ring *rx_ring);
131 static void e1000_set_rx_mode(struct net_device *netdev);
132 static void e1000_update_phy_info(unsigned long data);
133 static void e1000_update_phy_info_task(struct work_struct *work);
134 static void e1000_watchdog(unsigned long data);
135 static void e1000_82547_tx_fifo_stall(unsigned long data);
136 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
137 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
138 struct net_device *netdev);
139 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
140 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
141 static int e1000_set_mac(struct net_device *netdev, void *p);
142 static irqreturn_t e1000_intr(int irq, void *data);
143 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
144 struct e1000_tx_ring *tx_ring);
145 static int e1000_clean(struct napi_struct *napi, int budget);
146 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
147 struct e1000_rx_ring *rx_ring,
148 int *work_done, int work_to_do);
149 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
150 struct e1000_rx_ring *rx_ring,
151 int *work_done, int work_to_do);
152 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
153 struct e1000_rx_ring *rx_ring,
155 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
156 struct e1000_rx_ring *rx_ring,
158 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
159 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
161 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
162 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
163 static void e1000_tx_timeout(struct net_device *dev);
164 static void e1000_reset_task(struct work_struct *work);
165 static void e1000_smartspeed(struct e1000_adapter *adapter);
166 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
167 struct sk_buff *skb);
169 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
170 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
171 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
172 static void e1000_restore_vlan(struct e1000_adapter *adapter);
175 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
176 static int e1000_resume(struct pci_dev *pdev);
178 static void e1000_shutdown(struct pci_dev *pdev);
180 #ifdef CONFIG_NET_POLL_CONTROLLER
181 /* for netdump / net console */
182 static void e1000_netpoll (struct net_device *netdev);
185 #define COPYBREAK_DEFAULT 256
186 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
187 module_param(copybreak, uint, 0644);
188 MODULE_PARM_DESC(copybreak,
189 "Maximum size of packet that is copied to a new buffer on receive");
191 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
192 pci_channel_state_t state);
193 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
194 static void e1000_io_resume(struct pci_dev *pdev);
196 static struct pci_error_handlers e1000_err_handler = {
197 .error_detected = e1000_io_error_detected,
198 .slot_reset = e1000_io_slot_reset,
199 .resume = e1000_io_resume,
202 static struct pci_driver e1000_driver = {
203 .name = e1000_driver_name,
204 .id_table = e1000_pci_tbl,
205 .probe = e1000_probe,
206 .remove = __devexit_p(e1000_remove),
208 /* Power Managment Hooks */
209 .suspend = e1000_suspend,
210 .resume = e1000_resume,
212 .shutdown = e1000_shutdown,
213 .err_handler = &e1000_err_handler
217 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
218 MODULE_LICENSE("GPL");
219 MODULE_VERSION(DRV_VERSION);
221 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
222 module_param(debug, int, 0);
223 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
226 * e1000_get_hw_dev - return device
227 * used by hardware layer to print debugging information
230 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
232 struct e1000_adapter *adapter = hw->back;
233 return adapter->netdev;
237 * e1000_init_module - Driver Registration Routine
239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem.
243 static int __init e1000_init_module(void)
246 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
248 pr_info("%s\n", e1000_copyright);
250 ret = pci_register_driver(&e1000_driver);
251 if (copybreak != COPYBREAK_DEFAULT) {
253 pr_info("copybreak disabled\n");
255 pr_info("copybreak enabled for "
256 "packets <= %u bytes\n", copybreak);
261 module_init(e1000_init_module);
264 * e1000_exit_module - Driver Exit Cleanup Routine
266 * e1000_exit_module is called just before the driver is removed
270 static void __exit e1000_exit_module(void)
272 pci_unregister_driver(&e1000_driver);
275 module_exit(e1000_exit_module);
277 static int e1000_request_irq(struct e1000_adapter *adapter)
279 struct net_device *netdev = adapter->netdev;
280 irq_handler_t handler = e1000_intr;
281 int irq_flags = IRQF_SHARED;
284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
293 static void e1000_free_irq(struct e1000_adapter *adapter)
295 struct net_device *netdev = adapter->netdev;
297 free_irq(adapter->pdev->irq, netdev);
301 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure
305 static void e1000_irq_disable(struct e1000_adapter *adapter)
307 struct e1000_hw *hw = &adapter->hw;
311 synchronize_irq(adapter->pdev->irq);
315 * e1000_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
319 static void e1000_irq_enable(struct e1000_adapter *adapter)
321 struct e1000_hw *hw = &adapter->hw;
323 ew32(IMS, IMS_ENABLE_MASK);
327 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
329 struct e1000_hw *hw = &adapter->hw;
330 struct net_device *netdev = adapter->netdev;
331 u16 vid = hw->mng_cookie.vlan_id;
332 u16 old_vid = adapter->mng_vlan_id;
333 if (adapter->vlgrp) {
334 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
335 if (hw->mng_cookie.status &
336 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
337 e1000_vlan_rx_add_vid(netdev, vid);
338 adapter->mng_vlan_id = vid;
340 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
342 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
344 !vlan_group_get_device(adapter->vlgrp, old_vid))
345 e1000_vlan_rx_kill_vid(netdev, old_vid);
347 adapter->mng_vlan_id = vid;
351 static void e1000_init_manageability(struct e1000_adapter *adapter)
353 struct e1000_hw *hw = &adapter->hw;
355 if (adapter->en_mng_pt) {
356 u32 manc = er32(MANC);
358 /* disable hardware interception of ARP */
359 manc &= ~(E1000_MANC_ARP_EN);
365 static void e1000_release_manageability(struct e1000_adapter *adapter)
367 struct e1000_hw *hw = &adapter->hw;
369 if (adapter->en_mng_pt) {
370 u32 manc = er32(MANC);
372 /* re-enable hardware interception of ARP */
373 manc |= E1000_MANC_ARP_EN;
380 * e1000_configure - configure the hardware for RX and TX
381 * @adapter = private board structure
383 static void e1000_configure(struct e1000_adapter *adapter)
385 struct net_device *netdev = adapter->netdev;
388 e1000_set_rx_mode(netdev);
390 e1000_restore_vlan(adapter);
391 e1000_init_manageability(adapter);
393 e1000_configure_tx(adapter);
394 e1000_setup_rctl(adapter);
395 e1000_configure_rx(adapter);
396 /* call E1000_DESC_UNUSED which always leaves
397 * at least 1 descriptor unused to make sure
398 * next_to_use != next_to_clean */
399 for (i = 0; i < adapter->num_rx_queues; i++) {
400 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
401 adapter->alloc_rx_buf(adapter, ring,
402 E1000_DESC_UNUSED(ring));
406 int e1000_up(struct e1000_adapter *adapter)
408 struct e1000_hw *hw = &adapter->hw;
410 /* hardware has been reset, we need to reload some things */
411 e1000_configure(adapter);
413 clear_bit(__E1000_DOWN, &adapter->flags);
415 napi_enable(&adapter->napi);
417 e1000_irq_enable(adapter);
419 netif_wake_queue(adapter->netdev);
421 /* fire a link change interrupt to start the watchdog */
422 ew32(ICS, E1000_ICS_LSC);
427 * e1000_power_up_phy - restore link in case the phy was powered down
428 * @adapter: address of board private structure
430 * The phy may be powered down to save power and turn off link when the
431 * driver is unloaded and wake on lan is not enabled (among others)
432 * *** this routine MUST be followed by a call to e1000_reset ***
436 void e1000_power_up_phy(struct e1000_adapter *adapter)
438 struct e1000_hw *hw = &adapter->hw;
441 /* Just clear the power down bit to wake the phy back up */
442 if (hw->media_type == e1000_media_type_copper) {
443 /* according to the manual, the phy will retain its
444 * settings across a power-down/up cycle */
445 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
446 mii_reg &= ~MII_CR_POWER_DOWN;
447 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
451 static void e1000_power_down_phy(struct e1000_adapter *adapter)
453 struct e1000_hw *hw = &adapter->hw;
455 /* Power down the PHY so no link is implied when interface is down *
456 * The PHY cannot be powered down if any of the following is true *
459 * (c) SoL/IDER session is active */
460 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
461 hw->media_type == e1000_media_type_copper) {
464 switch (hw->mac_type) {
467 case e1000_82545_rev_3:
470 case e1000_82546_rev_3:
472 case e1000_82541_rev_2:
474 case e1000_82547_rev_2:
475 if (er32(MANC) & E1000_MANC_SMBUS_EN)
481 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
482 mii_reg |= MII_CR_POWER_DOWN;
483 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
490 void e1000_down(struct e1000_adapter *adapter)
492 struct e1000_hw *hw = &adapter->hw;
493 struct net_device *netdev = adapter->netdev;
497 /* disable receives in the hardware */
499 ew32(RCTL, rctl & ~E1000_RCTL_EN);
500 /* flush and sleep below */
502 netif_tx_disable(netdev);
504 /* disable transmits in the hardware */
506 tctl &= ~E1000_TCTL_EN;
508 /* flush both disables and wait for them to finish */
512 napi_disable(&adapter->napi);
514 e1000_irq_disable(adapter);
517 * Setting DOWN must be after irq_disable to prevent
518 * a screaming interrupt. Setting DOWN also prevents
519 * timers and tasks from rescheduling.
521 set_bit(__E1000_DOWN, &adapter->flags);
523 del_timer_sync(&adapter->tx_fifo_stall_timer);
524 del_timer_sync(&adapter->watchdog_timer);
525 del_timer_sync(&adapter->phy_info_timer);
527 adapter->link_speed = 0;
528 adapter->link_duplex = 0;
529 netif_carrier_off(netdev);
531 e1000_reset(adapter);
532 e1000_clean_all_tx_rings(adapter);
533 e1000_clean_all_rx_rings(adapter);
536 static void e1000_reinit_safe(struct e1000_adapter *adapter)
538 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
544 clear_bit(__E1000_RESETTING, &adapter->flags);
547 void e1000_reinit_locked(struct e1000_adapter *adapter)
549 /* if rtnl_lock is not held the call path is bogus */
551 WARN_ON(in_interrupt());
552 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
556 clear_bit(__E1000_RESETTING, &adapter->flags);
559 void e1000_reset(struct e1000_adapter *adapter)
561 struct e1000_hw *hw = &adapter->hw;
562 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
563 bool legacy_pba_adjust = false;
566 /* Repartition Pba for greater than 9k mtu
567 * To take effect CTRL.RST is required.
570 switch (hw->mac_type) {
571 case e1000_82542_rev2_0:
572 case e1000_82542_rev2_1:
577 case e1000_82541_rev_2:
578 legacy_pba_adjust = true;
582 case e1000_82545_rev_3:
585 case e1000_82546_rev_3:
589 case e1000_82547_rev_2:
590 legacy_pba_adjust = true;
593 case e1000_undefined:
598 if (legacy_pba_adjust) {
599 if (hw->max_frame_size > E1000_RXBUFFER_8192)
600 pba -= 8; /* allocate more FIFO for Tx */
602 if (hw->mac_type == e1000_82547) {
603 adapter->tx_fifo_head = 0;
604 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
605 adapter->tx_fifo_size =
606 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
607 atomic_set(&adapter->tx_fifo_stall, 0);
609 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
610 /* adjust PBA for jumbo frames */
613 /* To maintain wire speed transmits, the Tx FIFO should be
614 * large enough to accommodate two full transmit packets,
615 * rounded up to the next 1KB and expressed in KB. Likewise,
616 * the Rx FIFO should be large enough to accommodate at least
617 * one full receive packet and is similarly rounded up and
618 * expressed in KB. */
620 /* upper 16 bits has Tx packet buffer allocation size in KB */
621 tx_space = pba >> 16;
622 /* lower 16 bits has Rx packet buffer allocation size in KB */
625 * the tx fifo also stores 16 bytes of information about the tx
626 * but don't include ethernet FCS because hardware appends it
628 min_tx_space = (hw->max_frame_size +
629 sizeof(struct e1000_tx_desc) -
631 min_tx_space = ALIGN(min_tx_space, 1024);
633 /* software strips receive CRC, so leave room for it */
634 min_rx_space = hw->max_frame_size;
635 min_rx_space = ALIGN(min_rx_space, 1024);
638 /* If current Tx allocation is less than the min Tx FIFO size,
639 * and the min Tx FIFO size is less than the current Rx FIFO
640 * allocation, take space away from current Rx allocation */
641 if (tx_space < min_tx_space &&
642 ((min_tx_space - tx_space) < pba)) {
643 pba = pba - (min_tx_space - tx_space);
645 /* PCI/PCIx hardware has PBA alignment constraints */
646 switch (hw->mac_type) {
647 case e1000_82545 ... e1000_82546_rev_3:
648 pba &= ~(E1000_PBA_8K - 1);
654 /* if short on rx space, rx wins and must trump tx
655 * adjustment or use Early Receive if available */
656 if (pba < min_rx_space)
664 * flow control settings:
665 * The high water mark must be low enough to fit one full frame
666 * (or the size used for early receive) above it in the Rx FIFO.
667 * Set it to the lower of:
668 * - 90% of the Rx FIFO size, and
669 * - the full Rx FIFO size minus the early receive size (for parts
670 * with ERT support assuming ERT set to E1000_ERT_2048), or
671 * - the full Rx FIFO size minus one full frame
673 hwm = min(((pba << 10) * 9 / 10),
674 ((pba << 10) - hw->max_frame_size));
676 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
677 hw->fc_low_water = hw->fc_high_water - 8;
678 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
680 hw->fc = hw->original_fc;
682 /* Allow time for pending master requests to run */
684 if (hw->mac_type >= e1000_82544)
687 if (e1000_init_hw(hw))
688 e_dev_err("Hardware Error\n");
689 e1000_update_mng_vlan(adapter);
691 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
692 if (hw->mac_type >= e1000_82544 &&
694 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
695 u32 ctrl = er32(CTRL);
696 /* clear phy power management bit if we are in gig only mode,
697 * which if enabled will attempt negotiation to 100Mb, which
698 * can cause a loss of link at power off or driver unload */
699 ctrl &= ~E1000_CTRL_SWDPIN3;
703 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
704 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
706 e1000_reset_adaptive(hw);
707 e1000_phy_get_info(hw, &adapter->phy_info);
709 e1000_release_manageability(adapter);
713 * Dump the eeprom for users having checksum issues
715 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
717 struct net_device *netdev = adapter->netdev;
718 struct ethtool_eeprom eeprom;
719 const struct ethtool_ops *ops = netdev->ethtool_ops;
722 u16 csum_old, csum_new = 0;
724 eeprom.len = ops->get_eeprom_len(netdev);
727 data = kmalloc(eeprom.len, GFP_KERNEL);
729 pr_err("Unable to allocate memory to dump EEPROM data\n");
733 ops->get_eeprom(netdev, &eeprom, data);
735 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
736 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
737 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
738 csum_new += data[i] + (data[i + 1] << 8);
739 csum_new = EEPROM_SUM - csum_new;
741 pr_err("/*********************/\n");
742 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
743 pr_err("Calculated : 0x%04x\n", csum_new);
745 pr_err("Offset Values\n");
746 pr_err("======== ======\n");
747 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
749 pr_err("Include this output when contacting your support provider.\n");
750 pr_err("This is not a software error! Something bad happened to\n");
751 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
752 pr_err("result in further problems, possibly loss of data,\n");
753 pr_err("corruption or system hangs!\n");
754 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
755 pr_err("which is invalid and requires you to set the proper MAC\n");
756 pr_err("address manually before continuing to enable this network\n");
757 pr_err("device. Please inspect the EEPROM dump and report the\n");
758 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
759 pr_err("/*********************/\n");
765 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
766 * @pdev: PCI device information struct
768 * Return true if an adapter needs ioport resources
770 static int e1000_is_need_ioport(struct pci_dev *pdev)
772 switch (pdev->device) {
773 case E1000_DEV_ID_82540EM:
774 case E1000_DEV_ID_82540EM_LOM:
775 case E1000_DEV_ID_82540EP:
776 case E1000_DEV_ID_82540EP_LOM:
777 case E1000_DEV_ID_82540EP_LP:
778 case E1000_DEV_ID_82541EI:
779 case E1000_DEV_ID_82541EI_MOBILE:
780 case E1000_DEV_ID_82541ER:
781 case E1000_DEV_ID_82541ER_LOM:
782 case E1000_DEV_ID_82541GI:
783 case E1000_DEV_ID_82541GI_LF:
784 case E1000_DEV_ID_82541GI_MOBILE:
785 case E1000_DEV_ID_82544EI_COPPER:
786 case E1000_DEV_ID_82544EI_FIBER:
787 case E1000_DEV_ID_82544GC_COPPER:
788 case E1000_DEV_ID_82544GC_LOM:
789 case E1000_DEV_ID_82545EM_COPPER:
790 case E1000_DEV_ID_82545EM_FIBER:
791 case E1000_DEV_ID_82546EB_COPPER:
792 case E1000_DEV_ID_82546EB_FIBER:
793 case E1000_DEV_ID_82546EB_QUAD_COPPER:
800 static const struct net_device_ops e1000_netdev_ops = {
801 .ndo_open = e1000_open,
802 .ndo_stop = e1000_close,
803 .ndo_start_xmit = e1000_xmit_frame,
804 .ndo_get_stats = e1000_get_stats,
805 .ndo_set_rx_mode = e1000_set_rx_mode,
806 .ndo_set_mac_address = e1000_set_mac,
807 .ndo_tx_timeout = e1000_tx_timeout,
808 .ndo_change_mtu = e1000_change_mtu,
809 .ndo_do_ioctl = e1000_ioctl,
810 .ndo_validate_addr = eth_validate_addr,
812 .ndo_vlan_rx_register = e1000_vlan_rx_register,
813 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
814 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
815 #ifdef CONFIG_NET_POLL_CONTROLLER
816 .ndo_poll_controller = e1000_netpoll,
821 * e1000_init_hw_struct - initialize members of hw struct
822 * @adapter: board private struct
823 * @hw: structure used by e1000_hw.c
825 * Factors out initialization of the e1000_hw struct to its own function
826 * that can be called very early at init (just after struct allocation).
827 * Fields are initialized based on PCI device information and
828 * OS network device settings (MTU size).
829 * Returns negative error codes if MAC type setup fails.
831 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
834 struct pci_dev *pdev = adapter->pdev;
836 /* PCI config space info */
837 hw->vendor_id = pdev->vendor;
838 hw->device_id = pdev->device;
839 hw->subsystem_vendor_id = pdev->subsystem_vendor;
840 hw->subsystem_id = pdev->subsystem_device;
841 hw->revision_id = pdev->revision;
843 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
845 hw->max_frame_size = adapter->netdev->mtu +
846 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
847 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
849 /* identify the MAC */
850 if (e1000_set_mac_type(hw)) {
851 e_err(probe, "Unknown MAC Type\n");
855 switch (hw->mac_type) {
860 case e1000_82541_rev_2:
861 case e1000_82547_rev_2:
862 hw->phy_init_script = 1;
866 e1000_set_media_type(hw);
867 e1000_get_bus_info(hw);
869 hw->wait_autoneg_complete = false;
870 hw->tbi_compatibility_en = true;
871 hw->adaptive_ifs = true;
875 if (hw->media_type == e1000_media_type_copper) {
876 hw->mdix = AUTO_ALL_MODES;
877 hw->disable_polarity_correction = false;
878 hw->master_slave = E1000_MASTER_SLAVE;
885 * e1000_probe - Device Initialization Routine
886 * @pdev: PCI device information struct
887 * @ent: entry in e1000_pci_tbl
889 * Returns 0 on success, negative on failure
891 * e1000_probe initializes an adapter identified by a pci_dev structure.
892 * The OS initialization, configuring of the adapter private structure,
893 * and a hardware reset occur.
895 static int __devinit e1000_probe(struct pci_dev *pdev,
896 const struct pci_device_id *ent)
898 struct net_device *netdev;
899 struct e1000_adapter *adapter;
902 static int cards_found = 0;
903 static int global_quad_port_a = 0; /* global ksp3 port a indication */
904 int i, err, pci_using_dac;
907 u16 eeprom_apme_mask = E1000_EEPROM_APME;
908 int bars, need_ioport;
910 /* do not allocate ioport bars when not needed */
911 need_ioport = e1000_is_need_ioport(pdev);
913 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
914 err = pci_enable_device(pdev);
916 bars = pci_select_bars(pdev, IORESOURCE_MEM);
917 err = pci_enable_device_mem(pdev);
922 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
926 pci_set_master(pdev);
927 err = pci_save_state(pdev);
929 goto err_alloc_etherdev;
932 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
934 goto err_alloc_etherdev;
936 SET_NETDEV_DEV(netdev, &pdev->dev);
938 pci_set_drvdata(pdev, netdev);
939 adapter = netdev_priv(netdev);
940 adapter->netdev = netdev;
941 adapter->pdev = pdev;
942 adapter->msg_enable = (1 << debug) - 1;
943 adapter->bars = bars;
944 adapter->need_ioport = need_ioport;
950 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
954 if (adapter->need_ioport) {
955 for (i = BAR_1; i <= BAR_5; i++) {
956 if (pci_resource_len(pdev, i) == 0)
958 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
959 hw->io_base = pci_resource_start(pdev, i);
965 /* make ready for any if (hw->...) below */
966 err = e1000_init_hw_struct(adapter, hw);
971 * there is a workaround being applied below that limits
972 * 64-bit DMA addresses to 64-bit hardware. There are some
973 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
976 if ((hw->bus_type == e1000_bus_type_pcix) &&
977 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
979 * according to DMA-API-HOWTO, coherent calls will always
980 * succeed if the set call did
982 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
985 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
987 pr_err("No usable DMA config, aborting\n");
990 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
993 netdev->netdev_ops = &e1000_netdev_ops;
994 e1000_set_ethtool_ops(netdev);
995 netdev->watchdog_timeo = 5 * HZ;
996 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
998 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1000 adapter->bd_number = cards_found;
1002 /* setup the private structure */
1004 err = e1000_sw_init(adapter);
1009 if (hw->mac_type == e1000_ce4100) {
1010 ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
1011 ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
1012 pci_resource_len(pdev, BAR_1));
1014 if (!ce4100_gbe_mdio_base_virt)
1015 goto err_mdio_ioremap;
1018 if (hw->mac_type >= e1000_82543) {
1019 netdev->features = NETIF_F_SG |
1021 NETIF_F_HW_VLAN_TX |
1022 NETIF_F_HW_VLAN_RX |
1023 NETIF_F_HW_VLAN_FILTER;
1026 if ((hw->mac_type >= e1000_82544) &&
1027 (hw->mac_type != e1000_82547))
1028 netdev->features |= NETIF_F_TSO;
1030 if (pci_using_dac) {
1031 netdev->features |= NETIF_F_HIGHDMA;
1032 netdev->vlan_features |= NETIF_F_HIGHDMA;
1035 netdev->vlan_features |= NETIF_F_TSO;
1036 netdev->vlan_features |= NETIF_F_HW_CSUM;
1037 netdev->vlan_features |= NETIF_F_SG;
1039 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1041 /* initialize eeprom parameters */
1042 if (e1000_init_eeprom_params(hw)) {
1043 e_err(probe, "EEPROM initialization failed\n");
1047 /* before reading the EEPROM, reset the controller to
1048 * put the device in a known good starting state */
1052 /* make sure the EEPROM is good */
1053 if (e1000_validate_eeprom_checksum(hw) < 0) {
1054 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1055 e1000_dump_eeprom(adapter);
1057 * set MAC address to all zeroes to invalidate and temporary
1058 * disable this device for the user. This blocks regular
1059 * traffic while still permitting ethtool ioctls from reaching
1060 * the hardware as well as allowing the user to run the
1061 * interface after manually setting a hw addr using
1064 memset(hw->mac_addr, 0, netdev->addr_len);
1066 /* copy the MAC address out of the EEPROM */
1067 if (e1000_read_mac_addr(hw))
1068 e_err(probe, "EEPROM Read Error\n");
1070 /* don't block initalization here due to bad MAC address */
1071 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1072 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1074 if (!is_valid_ether_addr(netdev->perm_addr))
1075 e_err(probe, "Invalid MAC Address\n");
1077 init_timer(&adapter->tx_fifo_stall_timer);
1078 adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall;
1079 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
1081 init_timer(&adapter->watchdog_timer);
1082 adapter->watchdog_timer.function = e1000_watchdog;
1083 adapter->watchdog_timer.data = (unsigned long) adapter;
1085 init_timer(&adapter->phy_info_timer);
1086 adapter->phy_info_timer.function = e1000_update_phy_info;
1087 adapter->phy_info_timer.data = (unsigned long)adapter;
1089 INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task);
1090 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1091 INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1093 e1000_check_options(adapter);
1095 /* Initial Wake on LAN setting
1096 * If APM wake is enabled in the EEPROM,
1097 * enable the ACPI Magic Packet filter
1100 switch (hw->mac_type) {
1101 case e1000_82542_rev2_0:
1102 case e1000_82542_rev2_1:
1106 e1000_read_eeprom(hw,
1107 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1108 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1111 case e1000_82546_rev_3:
1112 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1113 e1000_read_eeprom(hw,
1114 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1119 e1000_read_eeprom(hw,
1120 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1123 if (eeprom_data & eeprom_apme_mask)
1124 adapter->eeprom_wol |= E1000_WUFC_MAG;
1126 /* now that we have the eeprom settings, apply the special cases
1127 * where the eeprom may be wrong or the board simply won't support
1128 * wake on lan on a particular port */
1129 switch (pdev->device) {
1130 case E1000_DEV_ID_82546GB_PCIE:
1131 adapter->eeprom_wol = 0;
1133 case E1000_DEV_ID_82546EB_FIBER:
1134 case E1000_DEV_ID_82546GB_FIBER:
1135 /* Wake events only supported on port A for dual fiber
1136 * regardless of eeprom setting */
1137 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1138 adapter->eeprom_wol = 0;
1140 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1141 /* if quad port adapter, disable WoL on all but port A */
1142 if (global_quad_port_a != 0)
1143 adapter->eeprom_wol = 0;
1145 adapter->quad_port_a = 1;
1146 /* Reset for multiple quad port adapters */
1147 if (++global_quad_port_a == 4)
1148 global_quad_port_a = 0;
1152 /* initialize the wol settings based on the eeprom settings */
1153 adapter->wol = adapter->eeprom_wol;
1154 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1156 /* Auto detect PHY address */
1157 if (hw->mac_type == e1000_ce4100) {
1158 for (i = 0; i < 32; i++) {
1160 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1161 if (tmp == 0 || tmp == 0xFF) {
1170 /* reset the hardware with the new settings */
1171 e1000_reset(adapter);
1173 strcpy(netdev->name, "eth%d");
1174 err = register_netdev(netdev);
1178 /* print bus type/speed/width info */
1179 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1180 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1181 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1182 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1183 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1184 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1185 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1188 /* carrier off reporting is important to ethtool even BEFORE open */
1189 netif_carrier_off(netdev);
1191 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1198 e1000_phy_hw_reset(hw);
1200 if (hw->flash_address)
1201 iounmap(hw->flash_address);
1202 kfree(adapter->tx_ring);
1203 kfree(adapter->rx_ring);
1207 iounmap(ce4100_gbe_mdio_base_virt);
1208 iounmap(hw->hw_addr);
1210 free_netdev(netdev);
1212 pci_release_selected_regions(pdev, bars);
1214 pci_disable_device(pdev);
1219 * e1000_remove - Device Removal Routine
1220 * @pdev: PCI device information struct
1222 * e1000_remove is called by the PCI subsystem to alert the driver
1223 * that it should release a PCI device. The could be caused by a
1224 * Hot-Plug event, or because the driver is going to be removed from
1228 static void __devexit e1000_remove(struct pci_dev *pdev)
1230 struct net_device *netdev = pci_get_drvdata(pdev);
1231 struct e1000_adapter *adapter = netdev_priv(netdev);
1232 struct e1000_hw *hw = &adapter->hw;
1234 set_bit(__E1000_DOWN, &adapter->flags);
1235 del_timer_sync(&adapter->tx_fifo_stall_timer);
1236 del_timer_sync(&adapter->watchdog_timer);
1237 del_timer_sync(&adapter->phy_info_timer);
1239 cancel_work_sync(&adapter->reset_task);
1241 e1000_release_manageability(adapter);
1243 unregister_netdev(netdev);
1245 e1000_phy_hw_reset(hw);
1247 kfree(adapter->tx_ring);
1248 kfree(adapter->rx_ring);
1250 iounmap(hw->hw_addr);
1251 if (hw->flash_address)
1252 iounmap(hw->flash_address);
1253 pci_release_selected_regions(pdev, adapter->bars);
1255 free_netdev(netdev);
1257 pci_disable_device(pdev);
1261 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1262 * @adapter: board private structure to initialize
1264 * e1000_sw_init initializes the Adapter private data structure.
1265 * e1000_init_hw_struct MUST be called before this function
1268 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1270 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1272 adapter->num_tx_queues = 1;
1273 adapter->num_rx_queues = 1;
1275 if (e1000_alloc_queues(adapter)) {
1276 e_err(probe, "Unable to allocate memory for queues\n");
1280 /* Explicitly disable IRQ since the NIC can be in any state. */
1281 e1000_irq_disable(adapter);
1283 spin_lock_init(&adapter->stats_lock);
1285 set_bit(__E1000_DOWN, &adapter->flags);
1291 * e1000_alloc_queues - Allocate memory for all rings
1292 * @adapter: board private structure to initialize
1294 * We allocate one ring per queue at run-time since we don't know the
1295 * number of queues at compile-time.
1298 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1300 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1301 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1302 if (!adapter->tx_ring)
1305 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1306 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1307 if (!adapter->rx_ring) {
1308 kfree(adapter->tx_ring);
1312 return E1000_SUCCESS;
1316 * e1000_open - Called when a network interface is made active
1317 * @netdev: network interface device structure
1319 * Returns 0 on success, negative value on failure
1321 * The open entry point is called when a network interface is made
1322 * active by the system (IFF_UP). At this point all resources needed
1323 * for transmit and receive operations are allocated, the interrupt
1324 * handler is registered with the OS, the watchdog timer is started,
1325 * and the stack is notified that the interface is ready.
1328 static int e1000_open(struct net_device *netdev)
1330 struct e1000_adapter *adapter = netdev_priv(netdev);
1331 struct e1000_hw *hw = &adapter->hw;
1334 /* disallow open during test */
1335 if (test_bit(__E1000_TESTING, &adapter->flags))
1338 netif_carrier_off(netdev);
1340 /* allocate transmit descriptors */
1341 err = e1000_setup_all_tx_resources(adapter);
1345 /* allocate receive descriptors */
1346 err = e1000_setup_all_rx_resources(adapter);
1350 e1000_power_up_phy(adapter);
1352 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1353 if ((hw->mng_cookie.status &
1354 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1355 e1000_update_mng_vlan(adapter);
1358 /* before we allocate an interrupt, we must be ready to handle it.
1359 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1360 * as soon as we call pci_request_irq, so we have to setup our
1361 * clean_rx handler before we do so. */
1362 e1000_configure(adapter);
1364 err = e1000_request_irq(adapter);
1368 /* From here on the code is the same as e1000_up() */
1369 clear_bit(__E1000_DOWN, &adapter->flags);
1371 napi_enable(&adapter->napi);
1373 e1000_irq_enable(adapter);
1375 netif_start_queue(netdev);
1377 /* fire a link status change interrupt to start the watchdog */
1378 ew32(ICS, E1000_ICS_LSC);
1380 return E1000_SUCCESS;
1383 e1000_power_down_phy(adapter);
1384 e1000_free_all_rx_resources(adapter);
1386 e1000_free_all_tx_resources(adapter);
1388 e1000_reset(adapter);
1394 * e1000_close - Disables a network interface
1395 * @netdev: network interface device structure
1397 * Returns 0, this is not allowed to fail
1399 * The close entry point is called when an interface is de-activated
1400 * by the OS. The hardware is still under the drivers control, but
1401 * needs to be disabled. A global MAC reset is issued to stop the
1402 * hardware, and all transmit and receive resources are freed.
1405 static int e1000_close(struct net_device *netdev)
1407 struct e1000_adapter *adapter = netdev_priv(netdev);
1408 struct e1000_hw *hw = &adapter->hw;
1410 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1411 e1000_down(adapter);
1412 e1000_power_down_phy(adapter);
1413 e1000_free_irq(adapter);
1415 e1000_free_all_tx_resources(adapter);
1416 e1000_free_all_rx_resources(adapter);
1418 /* kill manageability vlan ID if supported, but not if a vlan with
1419 * the same ID is registered on the host OS (let 8021q kill it) */
1420 if ((hw->mng_cookie.status &
1421 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1423 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
1424 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1431 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1432 * @adapter: address of board private structure
1433 * @start: address of beginning of memory
1434 * @len: length of memory
1436 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1439 struct e1000_hw *hw = &adapter->hw;
1440 unsigned long begin = (unsigned long)start;
1441 unsigned long end = begin + len;
1443 /* First rev 82545 and 82546 need to not allow any memory
1444 * write location to cross 64k boundary due to errata 23 */
1445 if (hw->mac_type == e1000_82545 ||
1446 hw->mac_type == e1000_ce4100 ||
1447 hw->mac_type == e1000_82546) {
1448 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1455 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1456 * @adapter: board private structure
1457 * @txdr: tx descriptor ring (for a specific queue) to setup
1459 * Return 0 on success, negative on failure
1462 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1463 struct e1000_tx_ring *txdr)
1465 struct pci_dev *pdev = adapter->pdev;
1468 size = sizeof(struct e1000_buffer) * txdr->count;
1469 txdr->buffer_info = vzalloc(size);
1470 if (!txdr->buffer_info) {
1471 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1476 /* round up to nearest 4K */
1478 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1479 txdr->size = ALIGN(txdr->size, 4096);
1481 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1485 vfree(txdr->buffer_info);
1486 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1491 /* Fix for errata 23, can't cross 64kB boundary */
1492 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1493 void *olddesc = txdr->desc;
1494 dma_addr_t olddma = txdr->dma;
1495 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1496 txdr->size, txdr->desc);
1497 /* Try again, without freeing the previous */
1498 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1499 &txdr->dma, GFP_KERNEL);
1500 /* Failed allocation, critical failure */
1502 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1504 goto setup_tx_desc_die;
1507 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1509 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1511 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1513 e_err(probe, "Unable to allocate aligned memory "
1514 "for the transmit descriptor ring\n");
1515 vfree(txdr->buffer_info);
1518 /* Free old allocation, new allocation was successful */
1519 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1523 memset(txdr->desc, 0, txdr->size);
1525 txdr->next_to_use = 0;
1526 txdr->next_to_clean = 0;
1532 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1533 * (Descriptors) for all queues
1534 * @adapter: board private structure
1536 * Return 0 on success, negative on failure
1539 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1543 for (i = 0; i < adapter->num_tx_queues; i++) {
1544 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1546 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1547 for (i-- ; i >= 0; i--)
1548 e1000_free_tx_resources(adapter,
1549 &adapter->tx_ring[i]);
1558 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1559 * @adapter: board private structure
1561 * Configure the Tx unit of the MAC after a reset.
1564 static void e1000_configure_tx(struct e1000_adapter *adapter)
1567 struct e1000_hw *hw = &adapter->hw;
1568 u32 tdlen, tctl, tipg;
1571 /* Setup the HW Tx Head and Tail descriptor pointers */
1573 switch (adapter->num_tx_queues) {
1576 tdba = adapter->tx_ring[0].dma;
1577 tdlen = adapter->tx_ring[0].count *
1578 sizeof(struct e1000_tx_desc);
1580 ew32(TDBAH, (tdba >> 32));
1581 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1584 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1585 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1589 /* Set the default values for the Tx Inter Packet Gap timer */
1590 if ((hw->media_type == e1000_media_type_fiber ||
1591 hw->media_type == e1000_media_type_internal_serdes))
1592 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1594 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1596 switch (hw->mac_type) {
1597 case e1000_82542_rev2_0:
1598 case e1000_82542_rev2_1:
1599 tipg = DEFAULT_82542_TIPG_IPGT;
1600 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1601 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1604 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1605 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1608 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1609 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1612 /* Set the Tx Interrupt Delay register */
1614 ew32(TIDV, adapter->tx_int_delay);
1615 if (hw->mac_type >= e1000_82540)
1616 ew32(TADV, adapter->tx_abs_int_delay);
1618 /* Program the Transmit Control Register */
1621 tctl &= ~E1000_TCTL_CT;
1622 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1623 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1625 e1000_config_collision_dist(hw);
1627 /* Setup Transmit Descriptor Settings for eop descriptor */
1628 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1630 /* only set IDE if we are delaying interrupts using the timers */
1631 if (adapter->tx_int_delay)
1632 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1634 if (hw->mac_type < e1000_82543)
1635 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1637 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1639 /* Cache if we're 82544 running in PCI-X because we'll
1640 * need this to apply a workaround later in the send path. */
1641 if (hw->mac_type == e1000_82544 &&
1642 hw->bus_type == e1000_bus_type_pcix)
1643 adapter->pcix_82544 = 1;
1650 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1651 * @adapter: board private structure
1652 * @rxdr: rx descriptor ring (for a specific queue) to setup
1654 * Returns 0 on success, negative on failure
1657 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1658 struct e1000_rx_ring *rxdr)
1660 struct pci_dev *pdev = adapter->pdev;
1663 size = sizeof(struct e1000_buffer) * rxdr->count;
1664 rxdr->buffer_info = vzalloc(size);
1665 if (!rxdr->buffer_info) {
1666 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1671 desc_len = sizeof(struct e1000_rx_desc);
1673 /* Round up to nearest 4K */
1675 rxdr->size = rxdr->count * desc_len;
1676 rxdr->size = ALIGN(rxdr->size, 4096);
1678 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1682 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1685 vfree(rxdr->buffer_info);
1689 /* Fix for errata 23, can't cross 64kB boundary */
1690 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1691 void *olddesc = rxdr->desc;
1692 dma_addr_t olddma = rxdr->dma;
1693 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1694 rxdr->size, rxdr->desc);
1695 /* Try again, without freeing the previous */
1696 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1697 &rxdr->dma, GFP_KERNEL);
1698 /* Failed allocation, critical failure */
1700 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1702 e_err(probe, "Unable to allocate memory for the Rx "
1703 "descriptor ring\n");
1704 goto setup_rx_desc_die;
1707 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1709 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1711 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1713 e_err(probe, "Unable to allocate aligned memory for "
1714 "the Rx descriptor ring\n");
1715 goto setup_rx_desc_die;
1717 /* Free old allocation, new allocation was successful */
1718 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1722 memset(rxdr->desc, 0, rxdr->size);
1724 rxdr->next_to_clean = 0;
1725 rxdr->next_to_use = 0;
1726 rxdr->rx_skb_top = NULL;
1732 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1733 * (Descriptors) for all queues
1734 * @adapter: board private structure
1736 * Return 0 on success, negative on failure
1739 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1743 for (i = 0; i < adapter->num_rx_queues; i++) {
1744 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1746 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1747 for (i-- ; i >= 0; i--)
1748 e1000_free_rx_resources(adapter,
1749 &adapter->rx_ring[i]);
1758 * e1000_setup_rctl - configure the receive control registers
1759 * @adapter: Board private structure
1761 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1763 struct e1000_hw *hw = &adapter->hw;
1768 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1770 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1771 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1772 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1774 if (hw->tbi_compatibility_on == 1)
1775 rctl |= E1000_RCTL_SBP;
1777 rctl &= ~E1000_RCTL_SBP;
1779 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1780 rctl &= ~E1000_RCTL_LPE;
1782 rctl |= E1000_RCTL_LPE;
1784 /* Setup buffer sizes */
1785 rctl &= ~E1000_RCTL_SZ_4096;
1786 rctl |= E1000_RCTL_BSEX;
1787 switch (adapter->rx_buffer_len) {
1788 case E1000_RXBUFFER_2048:
1790 rctl |= E1000_RCTL_SZ_2048;
1791 rctl &= ~E1000_RCTL_BSEX;
1793 case E1000_RXBUFFER_4096:
1794 rctl |= E1000_RCTL_SZ_4096;
1796 case E1000_RXBUFFER_8192:
1797 rctl |= E1000_RCTL_SZ_8192;
1799 case E1000_RXBUFFER_16384:
1800 rctl |= E1000_RCTL_SZ_16384;
1808 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1809 * @adapter: board private structure
1811 * Configure the Rx unit of the MAC after a reset.
1814 static void e1000_configure_rx(struct e1000_adapter *adapter)
1817 struct e1000_hw *hw = &adapter->hw;
1818 u32 rdlen, rctl, rxcsum;
1820 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1821 rdlen = adapter->rx_ring[0].count *
1822 sizeof(struct e1000_rx_desc);
1823 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1824 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1826 rdlen = adapter->rx_ring[0].count *
1827 sizeof(struct e1000_rx_desc);
1828 adapter->clean_rx = e1000_clean_rx_irq;
1829 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1832 /* disable receives while setting up the descriptors */
1834 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1836 /* set the Receive Delay Timer Register */
1837 ew32(RDTR, adapter->rx_int_delay);
1839 if (hw->mac_type >= e1000_82540) {
1840 ew32(RADV, adapter->rx_abs_int_delay);
1841 if (adapter->itr_setting != 0)
1842 ew32(ITR, 1000000000 / (adapter->itr * 256));
1845 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1846 * the Base and Length of the Rx Descriptor Ring */
1847 switch (adapter->num_rx_queues) {
1850 rdba = adapter->rx_ring[0].dma;
1852 ew32(RDBAH, (rdba >> 32));
1853 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1856 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1857 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1861 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1862 if (hw->mac_type >= e1000_82543) {
1863 rxcsum = er32(RXCSUM);
1864 if (adapter->rx_csum)
1865 rxcsum |= E1000_RXCSUM_TUOFL;
1867 /* don't need to clear IPPCSE as it defaults to 0 */
1868 rxcsum &= ~E1000_RXCSUM_TUOFL;
1869 ew32(RXCSUM, rxcsum);
1872 /* Enable Receives */
1877 * e1000_free_tx_resources - Free Tx Resources per Queue
1878 * @adapter: board private structure
1879 * @tx_ring: Tx descriptor ring for a specific queue
1881 * Free all transmit software resources
1884 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1885 struct e1000_tx_ring *tx_ring)
1887 struct pci_dev *pdev = adapter->pdev;
1889 e1000_clean_tx_ring(adapter, tx_ring);
1891 vfree(tx_ring->buffer_info);
1892 tx_ring->buffer_info = NULL;
1894 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1897 tx_ring->desc = NULL;
1901 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1902 * @adapter: board private structure
1904 * Free all transmit software resources
1907 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1911 for (i = 0; i < adapter->num_tx_queues; i++)
1912 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1915 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1916 struct e1000_buffer *buffer_info)
1918 if (buffer_info->dma) {
1919 if (buffer_info->mapped_as_page)
1920 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1921 buffer_info->length, DMA_TO_DEVICE);
1923 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1924 buffer_info->length,
1926 buffer_info->dma = 0;
1928 if (buffer_info->skb) {
1929 dev_kfree_skb_any(buffer_info->skb);
1930 buffer_info->skb = NULL;
1932 buffer_info->time_stamp = 0;
1933 /* buffer_info must be completely set up in the transmit path */
1937 * e1000_clean_tx_ring - Free Tx Buffers
1938 * @adapter: board private structure
1939 * @tx_ring: ring to be cleaned
1942 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1943 struct e1000_tx_ring *tx_ring)
1945 struct e1000_hw *hw = &adapter->hw;
1946 struct e1000_buffer *buffer_info;
1950 /* Free all the Tx ring sk_buffs */
1952 for (i = 0; i < tx_ring->count; i++) {
1953 buffer_info = &tx_ring->buffer_info[i];
1954 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1957 size = sizeof(struct e1000_buffer) * tx_ring->count;
1958 memset(tx_ring->buffer_info, 0, size);
1960 /* Zero out the descriptor ring */
1962 memset(tx_ring->desc, 0, tx_ring->size);
1964 tx_ring->next_to_use = 0;
1965 tx_ring->next_to_clean = 0;
1966 tx_ring->last_tx_tso = 0;
1968 writel(0, hw->hw_addr + tx_ring->tdh);
1969 writel(0, hw->hw_addr + tx_ring->tdt);
1973 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
1974 * @adapter: board private structure
1977 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
1981 for (i = 0; i < adapter->num_tx_queues; i++)
1982 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1986 * e1000_free_rx_resources - Free Rx Resources
1987 * @adapter: board private structure
1988 * @rx_ring: ring to clean the resources from
1990 * Free all receive software resources
1993 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
1994 struct e1000_rx_ring *rx_ring)
1996 struct pci_dev *pdev = adapter->pdev;
1998 e1000_clean_rx_ring(adapter, rx_ring);
2000 vfree(rx_ring->buffer_info);
2001 rx_ring->buffer_info = NULL;
2003 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2006 rx_ring->desc = NULL;
2010 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2011 * @adapter: board private structure
2013 * Free all receive software resources
2016 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2020 for (i = 0; i < adapter->num_rx_queues; i++)
2021 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2025 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2026 * @adapter: board private structure
2027 * @rx_ring: ring to free buffers from
2030 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2031 struct e1000_rx_ring *rx_ring)
2033 struct e1000_hw *hw = &adapter->hw;
2034 struct e1000_buffer *buffer_info;
2035 struct pci_dev *pdev = adapter->pdev;
2039 /* Free all the Rx ring sk_buffs */
2040 for (i = 0; i < rx_ring->count; i++) {
2041 buffer_info = &rx_ring->buffer_info[i];
2042 if (buffer_info->dma &&
2043 adapter->clean_rx == e1000_clean_rx_irq) {
2044 dma_unmap_single(&pdev->dev, buffer_info->dma,
2045 buffer_info->length,
2047 } else if (buffer_info->dma &&
2048 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2049 dma_unmap_page(&pdev->dev, buffer_info->dma,
2050 buffer_info->length,
2054 buffer_info->dma = 0;
2055 if (buffer_info->page) {
2056 put_page(buffer_info->page);
2057 buffer_info->page = NULL;
2059 if (buffer_info->skb) {
2060 dev_kfree_skb(buffer_info->skb);
2061 buffer_info->skb = NULL;
2065 /* there also may be some cached data from a chained receive */
2066 if (rx_ring->rx_skb_top) {
2067 dev_kfree_skb(rx_ring->rx_skb_top);
2068 rx_ring->rx_skb_top = NULL;
2071 size = sizeof(struct e1000_buffer) * rx_ring->count;
2072 memset(rx_ring->buffer_info, 0, size);
2074 /* Zero out the descriptor ring */
2075 memset(rx_ring->desc, 0, rx_ring->size);
2077 rx_ring->next_to_clean = 0;
2078 rx_ring->next_to_use = 0;
2080 writel(0, hw->hw_addr + rx_ring->rdh);
2081 writel(0, hw->hw_addr + rx_ring->rdt);
2085 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2086 * @adapter: board private structure
2089 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2093 for (i = 0; i < adapter->num_rx_queues; i++)
2094 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2097 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2098 * and memory write and invalidate disabled for certain operations
2100 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2102 struct e1000_hw *hw = &adapter->hw;
2103 struct net_device *netdev = adapter->netdev;
2106 e1000_pci_clear_mwi(hw);
2109 rctl |= E1000_RCTL_RST;
2111 E1000_WRITE_FLUSH();
2114 if (netif_running(netdev))
2115 e1000_clean_all_rx_rings(adapter);
2118 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2120 struct e1000_hw *hw = &adapter->hw;
2121 struct net_device *netdev = adapter->netdev;
2125 rctl &= ~E1000_RCTL_RST;
2127 E1000_WRITE_FLUSH();
2130 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2131 e1000_pci_set_mwi(hw);
2133 if (netif_running(netdev)) {
2134 /* No need to loop, because 82542 supports only 1 queue */
2135 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2136 e1000_configure_rx(adapter);
2137 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2142 * e1000_set_mac - Change the Ethernet Address of the NIC
2143 * @netdev: network interface device structure
2144 * @p: pointer to an address structure
2146 * Returns 0 on success, negative on failure
2149 static int e1000_set_mac(struct net_device *netdev, void *p)
2151 struct e1000_adapter *adapter = netdev_priv(netdev);
2152 struct e1000_hw *hw = &adapter->hw;
2153 struct sockaddr *addr = p;
2155 if (!is_valid_ether_addr(addr->sa_data))
2156 return -EADDRNOTAVAIL;
2158 /* 82542 2.0 needs to be in reset to write receive address registers */
2160 if (hw->mac_type == e1000_82542_rev2_0)
2161 e1000_enter_82542_rst(adapter);
2163 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2164 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2166 e1000_rar_set(hw, hw->mac_addr, 0);
2168 if (hw->mac_type == e1000_82542_rev2_0)
2169 e1000_leave_82542_rst(adapter);
2175 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2176 * @netdev: network interface device structure
2178 * The set_rx_mode entry point is called whenever the unicast or multicast
2179 * address lists or the network interface flags are updated. This routine is
2180 * responsible for configuring the hardware for proper unicast, multicast,
2181 * promiscuous mode, and all-multi behavior.
2184 static void e1000_set_rx_mode(struct net_device *netdev)
2186 struct e1000_adapter *adapter = netdev_priv(netdev);
2187 struct e1000_hw *hw = &adapter->hw;
2188 struct netdev_hw_addr *ha;
2189 bool use_uc = false;
2192 int i, rar_entries = E1000_RAR_ENTRIES;
2193 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2194 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2197 e_err(probe, "memory allocation failed\n");
2201 /* Check for Promiscuous and All Multicast modes */
2205 if (netdev->flags & IFF_PROMISC) {
2206 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2207 rctl &= ~E1000_RCTL_VFE;
2209 if (netdev->flags & IFF_ALLMULTI)
2210 rctl |= E1000_RCTL_MPE;
2212 rctl &= ~E1000_RCTL_MPE;
2213 /* Enable VLAN filter if there is a VLAN */
2215 rctl |= E1000_RCTL_VFE;
2218 if (netdev_uc_count(netdev) > rar_entries - 1) {
2219 rctl |= E1000_RCTL_UPE;
2220 } else if (!(netdev->flags & IFF_PROMISC)) {
2221 rctl &= ~E1000_RCTL_UPE;
2227 /* 82542 2.0 needs to be in reset to write receive address registers */
2229 if (hw->mac_type == e1000_82542_rev2_0)
2230 e1000_enter_82542_rst(adapter);
2232 /* load the first 14 addresses into the exact filters 1-14. Unicast
2233 * addresses take precedence to avoid disabling unicast filtering
2236 * RAR 0 is used for the station MAC address
2237 * if there are not 14 addresses, go ahead and clear the filters
2241 netdev_for_each_uc_addr(ha, netdev) {
2242 if (i == rar_entries)
2244 e1000_rar_set(hw, ha->addr, i++);
2247 netdev_for_each_mc_addr(ha, netdev) {
2248 if (i == rar_entries) {
2249 /* load any remaining addresses into the hash table */
2250 u32 hash_reg, hash_bit, mta;
2251 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2252 hash_reg = (hash_value >> 5) & 0x7F;
2253 hash_bit = hash_value & 0x1F;
2254 mta = (1 << hash_bit);
2255 mcarray[hash_reg] |= mta;
2257 e1000_rar_set(hw, ha->addr, i++);
2261 for (; i < rar_entries; i++) {
2262 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2263 E1000_WRITE_FLUSH();
2264 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2265 E1000_WRITE_FLUSH();
2268 /* write the hash table completely, write from bottom to avoid
2269 * both stupid write combining chipsets, and flushing each write */
2270 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2272 * If we are on an 82544 has an errata where writing odd
2273 * offsets overwrites the previous even offset, but writing
2274 * backwards over the range solves the issue by always
2275 * writing the odd offset first
2277 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2279 E1000_WRITE_FLUSH();
2281 if (hw->mac_type == e1000_82542_rev2_0)
2282 e1000_leave_82542_rst(adapter);
2287 /* Need to wait a few seconds after link up to get diagnostic information from
2290 static void e1000_update_phy_info(unsigned long data)
2292 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2293 schedule_work(&adapter->phy_info_task);
2296 static void e1000_update_phy_info_task(struct work_struct *work)
2298 struct e1000_adapter *adapter = container_of(work,
2299 struct e1000_adapter,
2301 struct e1000_hw *hw = &adapter->hw;
2304 e1000_phy_get_info(hw, &adapter->phy_info);
2309 * e1000_82547_tx_fifo_stall - Timer Call-back
2310 * @data: pointer to adapter cast into an unsigned long
2312 static void e1000_82547_tx_fifo_stall(unsigned long data)
2314 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2315 schedule_work(&adapter->fifo_stall_task);
2319 * e1000_82547_tx_fifo_stall_task - task to complete work
2320 * @work: work struct contained inside adapter struct
2322 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2324 struct e1000_adapter *adapter = container_of(work,
2325 struct e1000_adapter,
2327 struct e1000_hw *hw = &adapter->hw;
2328 struct net_device *netdev = adapter->netdev;
2332 if (atomic_read(&adapter->tx_fifo_stall)) {
2333 if ((er32(TDT) == er32(TDH)) &&
2334 (er32(TDFT) == er32(TDFH)) &&
2335 (er32(TDFTS) == er32(TDFHS))) {
2337 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2338 ew32(TDFT, adapter->tx_head_addr);
2339 ew32(TDFH, adapter->tx_head_addr);
2340 ew32(TDFTS, adapter->tx_head_addr);
2341 ew32(TDFHS, adapter->tx_head_addr);
2343 E1000_WRITE_FLUSH();
2345 adapter->tx_fifo_head = 0;
2346 atomic_set(&adapter->tx_fifo_stall, 0);
2347 netif_wake_queue(netdev);
2348 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2349 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2355 bool e1000_has_link(struct e1000_adapter *adapter)
2357 struct e1000_hw *hw = &adapter->hw;
2358 bool link_active = false;
2360 /* get_link_status is set on LSC (link status) interrupt or
2361 * rx sequence error interrupt. get_link_status will stay
2362 * false until the e1000_check_for_link establishes link
2363 * for copper adapters ONLY
2365 switch (hw->media_type) {
2366 case e1000_media_type_copper:
2367 if (hw->get_link_status) {
2368 e1000_check_for_link(hw);
2369 link_active = !hw->get_link_status;
2374 case e1000_media_type_fiber:
2375 e1000_check_for_link(hw);
2376 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2378 case e1000_media_type_internal_serdes:
2379 e1000_check_for_link(hw);
2380 link_active = hw->serdes_has_link;
2390 * e1000_watchdog - Timer Call-back
2391 * @data: pointer to adapter cast into an unsigned long
2393 static void e1000_watchdog(unsigned long data)
2395 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2396 struct e1000_hw *hw = &adapter->hw;
2397 struct net_device *netdev = adapter->netdev;
2398 struct e1000_tx_ring *txdr = adapter->tx_ring;
2401 link = e1000_has_link(adapter);
2402 if ((netif_carrier_ok(netdev)) && link)
2406 if (!netif_carrier_ok(netdev)) {
2409 /* update snapshot of PHY registers on LSC */
2410 e1000_get_speed_and_duplex(hw,
2411 &adapter->link_speed,
2412 &adapter->link_duplex);
2415 pr_info("%s NIC Link is Up %d Mbps %s, "
2416 "Flow Control: %s\n",
2418 adapter->link_speed,
2419 adapter->link_duplex == FULL_DUPLEX ?
2420 "Full Duplex" : "Half Duplex",
2421 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2422 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2423 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2424 E1000_CTRL_TFCE) ? "TX" : "None")));
2426 /* adjust timeout factor according to speed/duplex */
2427 adapter->tx_timeout_factor = 1;
2428 switch (adapter->link_speed) {
2431 adapter->tx_timeout_factor = 16;
2435 /* maybe add some timeout factor ? */
2439 /* enable transmits in the hardware */
2441 tctl |= E1000_TCTL_EN;
2444 netif_carrier_on(netdev);
2445 if (!test_bit(__E1000_DOWN, &adapter->flags))
2446 mod_timer(&adapter->phy_info_timer,
2447 round_jiffies(jiffies + 2 * HZ));
2448 adapter->smartspeed = 0;
2451 if (netif_carrier_ok(netdev)) {
2452 adapter->link_speed = 0;
2453 adapter->link_duplex = 0;
2454 pr_info("%s NIC Link is Down\n",
2456 netif_carrier_off(netdev);
2458 if (!test_bit(__E1000_DOWN, &adapter->flags))
2459 mod_timer(&adapter->phy_info_timer,
2460 round_jiffies(jiffies + 2 * HZ));
2463 e1000_smartspeed(adapter);
2467 e1000_update_stats(adapter);
2469 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2470 adapter->tpt_old = adapter->stats.tpt;
2471 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2472 adapter->colc_old = adapter->stats.colc;
2474 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2475 adapter->gorcl_old = adapter->stats.gorcl;
2476 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2477 adapter->gotcl_old = adapter->stats.gotcl;
2479 e1000_update_adaptive(hw);
2481 if (!netif_carrier_ok(netdev)) {
2482 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2483 /* We've lost link, so the controller stops DMA,
2484 * but we've got queued Tx work that's never going
2485 * to get done, so reset controller to flush Tx.
2486 * (Do the reset outside of interrupt context). */
2487 adapter->tx_timeout_count++;
2488 schedule_work(&adapter->reset_task);
2489 /* return immediately since reset is imminent */
2494 /* Simple mode for Interrupt Throttle Rate (ITR) */
2495 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2497 * Symmetric Tx/Rx gets a reduced ITR=2000;
2498 * Total asymmetrical Tx or Rx gets ITR=8000;
2499 * everyone else is between 2000-8000.
2501 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2502 u32 dif = (adapter->gotcl > adapter->gorcl ?
2503 adapter->gotcl - adapter->gorcl :
2504 adapter->gorcl - adapter->gotcl) / 10000;
2505 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2507 ew32(ITR, 1000000000 / (itr * 256));
2510 /* Cause software interrupt to ensure rx ring is cleaned */
2511 ew32(ICS, E1000_ICS_RXDMT0);
2513 /* Force detection of hung controller every watchdog period */
2514 adapter->detect_tx_hung = true;
2516 /* Reset the timer */
2517 if (!test_bit(__E1000_DOWN, &adapter->flags))
2518 mod_timer(&adapter->watchdog_timer,
2519 round_jiffies(jiffies + 2 * HZ));
2522 enum latency_range {
2526 latency_invalid = 255
2530 * e1000_update_itr - update the dynamic ITR value based on statistics
2531 * @adapter: pointer to adapter
2532 * @itr_setting: current adapter->itr
2533 * @packets: the number of packets during this measurement interval
2534 * @bytes: the number of bytes during this measurement interval
2536 * Stores a new ITR value based on packets and byte
2537 * counts during the last interrupt. The advantage of per interrupt
2538 * computation is faster updates and more accurate ITR for the current
2539 * traffic pattern. Constants in this function were computed
2540 * based on theoretical maximum wire speed and thresholds were set based
2541 * on testing data as well as attempting to minimize response time
2542 * while increasing bulk throughput.
2543 * this functionality is controlled by the InterruptThrottleRate module
2544 * parameter (see e1000_param.c)
2546 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2547 u16 itr_setting, int packets, int bytes)
2549 unsigned int retval = itr_setting;
2550 struct e1000_hw *hw = &adapter->hw;
2552 if (unlikely(hw->mac_type < e1000_82540))
2553 goto update_itr_done;
2556 goto update_itr_done;
2558 switch (itr_setting) {
2559 case lowest_latency:
2560 /* jumbo frames get bulk treatment*/
2561 if (bytes/packets > 8000)
2562 retval = bulk_latency;
2563 else if ((packets < 5) && (bytes > 512))
2564 retval = low_latency;
2566 case low_latency: /* 50 usec aka 20000 ints/s */
2567 if (bytes > 10000) {
2568 /* jumbo frames need bulk latency setting */
2569 if (bytes/packets > 8000)
2570 retval = bulk_latency;
2571 else if ((packets < 10) || ((bytes/packets) > 1200))
2572 retval = bulk_latency;
2573 else if ((packets > 35))
2574 retval = lowest_latency;
2575 } else if (bytes/packets > 2000)
2576 retval = bulk_latency;
2577 else if (packets <= 2 && bytes < 512)
2578 retval = lowest_latency;
2580 case bulk_latency: /* 250 usec aka 4000 ints/s */
2581 if (bytes > 25000) {
2583 retval = low_latency;
2584 } else if (bytes < 6000) {
2585 retval = low_latency;
2594 static void e1000_set_itr(struct e1000_adapter *adapter)
2596 struct e1000_hw *hw = &adapter->hw;
2598 u32 new_itr = adapter->itr;
2600 if (unlikely(hw->mac_type < e1000_82540))
2603 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2604 if (unlikely(adapter->link_speed != SPEED_1000)) {
2610 adapter->tx_itr = e1000_update_itr(adapter,
2612 adapter->total_tx_packets,
2613 adapter->total_tx_bytes);
2614 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2615 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2616 adapter->tx_itr = low_latency;
2618 adapter->rx_itr = e1000_update_itr(adapter,
2620 adapter->total_rx_packets,
2621 adapter->total_rx_bytes);
2622 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2623 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2624 adapter->rx_itr = low_latency;
2626 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2628 switch (current_itr) {
2629 /* counts and packets in update_itr are dependent on these numbers */
2630 case lowest_latency:
2634 new_itr = 20000; /* aka hwitr = ~200 */
2644 if (new_itr != adapter->itr) {
2645 /* this attempts to bias the interrupt rate towards Bulk
2646 * by adding intermediate steps when interrupt rate is
2648 new_itr = new_itr > adapter->itr ?
2649 min(adapter->itr + (new_itr >> 2), new_itr) :
2651 adapter->itr = new_itr;
2652 ew32(ITR, 1000000000 / (new_itr * 256));
2656 #define E1000_TX_FLAGS_CSUM 0x00000001
2657 #define E1000_TX_FLAGS_VLAN 0x00000002
2658 #define E1000_TX_FLAGS_TSO 0x00000004
2659 #define E1000_TX_FLAGS_IPV4 0x00000008
2660 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2661 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2663 static int e1000_tso(struct e1000_adapter *adapter,
2664 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2666 struct e1000_context_desc *context_desc;
2667 struct e1000_buffer *buffer_info;
2670 u16 ipcse = 0, tucse, mss;
2671 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2674 if (skb_is_gso(skb)) {
2675 if (skb_header_cloned(skb)) {
2676 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2681 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2682 mss = skb_shinfo(skb)->gso_size;
2683 if (skb->protocol == htons(ETH_P_IP)) {
2684 struct iphdr *iph = ip_hdr(skb);
2687 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2691 cmd_length = E1000_TXD_CMD_IP;
2692 ipcse = skb_transport_offset(skb) - 1;
2693 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2694 ipv6_hdr(skb)->payload_len = 0;
2695 tcp_hdr(skb)->check =
2696 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2697 &ipv6_hdr(skb)->daddr,
2701 ipcss = skb_network_offset(skb);
2702 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2703 tucss = skb_transport_offset(skb);
2704 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2707 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2708 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2710 i = tx_ring->next_to_use;
2711 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2712 buffer_info = &tx_ring->buffer_info[i];
2714 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2715 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2716 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2717 context_desc->upper_setup.tcp_fields.tucss = tucss;
2718 context_desc->upper_setup.tcp_fields.tucso = tucso;
2719 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2720 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2721 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2722 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2724 buffer_info->time_stamp = jiffies;
2725 buffer_info->next_to_watch = i;
2727 if (++i == tx_ring->count) i = 0;
2728 tx_ring->next_to_use = i;
2735 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2736 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2738 struct e1000_context_desc *context_desc;
2739 struct e1000_buffer *buffer_info;
2742 u32 cmd_len = E1000_TXD_CMD_DEXT;
2744 if (skb->ip_summed != CHECKSUM_PARTIAL)
2747 switch (skb->protocol) {
2748 case cpu_to_be16(ETH_P_IP):
2749 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2750 cmd_len |= E1000_TXD_CMD_TCP;
2752 case cpu_to_be16(ETH_P_IPV6):
2753 /* XXX not handling all IPV6 headers */
2754 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2755 cmd_len |= E1000_TXD_CMD_TCP;
2758 if (unlikely(net_ratelimit()))
2759 e_warn(drv, "checksum_partial proto=%x!\n",
2764 css = skb_checksum_start_offset(skb);
2766 i = tx_ring->next_to_use;
2767 buffer_info = &tx_ring->buffer_info[i];
2768 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2770 context_desc->lower_setup.ip_config = 0;
2771 context_desc->upper_setup.tcp_fields.tucss = css;
2772 context_desc->upper_setup.tcp_fields.tucso =
2773 css + skb->csum_offset;
2774 context_desc->upper_setup.tcp_fields.tucse = 0;
2775 context_desc->tcp_seg_setup.data = 0;
2776 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2778 buffer_info->time_stamp = jiffies;
2779 buffer_info->next_to_watch = i;
2781 if (unlikely(++i == tx_ring->count)) i = 0;
2782 tx_ring->next_to_use = i;
2787 #define E1000_MAX_TXD_PWR 12
2788 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2790 static int e1000_tx_map(struct e1000_adapter *adapter,
2791 struct e1000_tx_ring *tx_ring,
2792 struct sk_buff *skb, unsigned int first,
2793 unsigned int max_per_txd, unsigned int nr_frags,
2796 struct e1000_hw *hw = &adapter->hw;
2797 struct pci_dev *pdev = adapter->pdev;
2798 struct e1000_buffer *buffer_info;
2799 unsigned int len = skb_headlen(skb);
2800 unsigned int offset = 0, size, count = 0, i;
2803 i = tx_ring->next_to_use;
2806 buffer_info = &tx_ring->buffer_info[i];
2807 size = min(len, max_per_txd);
2808 /* Workaround for Controller erratum --
2809 * descriptor for non-tso packet in a linear SKB that follows a
2810 * tso gets written back prematurely before the data is fully
2811 * DMA'd to the controller */
2812 if (!skb->data_len && tx_ring->last_tx_tso &&
2814 tx_ring->last_tx_tso = 0;
2818 /* Workaround for premature desc write-backs
2819 * in TSO mode. Append 4-byte sentinel desc */
2820 if (unlikely(mss && !nr_frags && size == len && size > 8))
2822 /* work-around for errata 10 and it applies
2823 * to all controllers in PCI-X mode
2824 * The fix is to make sure that the first descriptor of a
2825 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2827 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2828 (size > 2015) && count == 0))
2831 /* Workaround for potential 82544 hang in PCI-X. Avoid
2832 * terminating buffers within evenly-aligned dwords. */
2833 if (unlikely(adapter->pcix_82544 &&
2834 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2838 buffer_info->length = size;
2839 /* set time_stamp *before* dma to help avoid a possible race */
2840 buffer_info->time_stamp = jiffies;
2841 buffer_info->mapped_as_page = false;
2842 buffer_info->dma = dma_map_single(&pdev->dev,
2844 size, DMA_TO_DEVICE);
2845 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2847 buffer_info->next_to_watch = i;
2854 if (unlikely(i == tx_ring->count))
2859 for (f = 0; f < nr_frags; f++) {
2860 struct skb_frag_struct *frag;
2862 frag = &skb_shinfo(skb)->frags[f];
2864 offset = frag->page_offset;
2868 if (unlikely(i == tx_ring->count))
2871 buffer_info = &tx_ring->buffer_info[i];
2872 size = min(len, max_per_txd);
2873 /* Workaround for premature desc write-backs
2874 * in TSO mode. Append 4-byte sentinel desc */
2875 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2877 /* Workaround for potential 82544 hang in PCI-X.
2878 * Avoid terminating buffers within evenly-aligned
2880 if (unlikely(adapter->pcix_82544 &&
2881 !((unsigned long)(page_to_phys(frag->page) + offset
2886 buffer_info->length = size;
2887 buffer_info->time_stamp = jiffies;
2888 buffer_info->mapped_as_page = true;
2889 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
2892 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2894 buffer_info->next_to_watch = i;
2902 tx_ring->buffer_info[i].skb = skb;
2903 tx_ring->buffer_info[first].next_to_watch = i;
2908 dev_err(&pdev->dev, "TX DMA map failed\n");
2909 buffer_info->dma = 0;
2915 i += tx_ring->count;
2917 buffer_info = &tx_ring->buffer_info[i];
2918 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2924 static void e1000_tx_queue(struct e1000_adapter *adapter,
2925 struct e1000_tx_ring *tx_ring, int tx_flags,
2928 struct e1000_hw *hw = &adapter->hw;
2929 struct e1000_tx_desc *tx_desc = NULL;
2930 struct e1000_buffer *buffer_info;
2931 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2934 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2935 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2937 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2939 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2940 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2943 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2944 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2945 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2948 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2949 txd_lower |= E1000_TXD_CMD_VLE;
2950 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2953 i = tx_ring->next_to_use;
2956 buffer_info = &tx_ring->buffer_info[i];
2957 tx_desc = E1000_TX_DESC(*tx_ring, i);
2958 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2959 tx_desc->lower.data =
2960 cpu_to_le32(txd_lower | buffer_info->length);
2961 tx_desc->upper.data = cpu_to_le32(txd_upper);
2962 if (unlikely(++i == tx_ring->count)) i = 0;
2965 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
2967 /* Force memory writes to complete before letting h/w
2968 * know there are new descriptors to fetch. (Only
2969 * applicable for weak-ordered memory model archs,
2970 * such as IA-64). */
2973 tx_ring->next_to_use = i;
2974 writel(i, hw->hw_addr + tx_ring->tdt);
2975 /* we need this if more than one processor can write to our tail
2976 * at a time, it syncronizes IO on IA64/Altix systems */
2981 * 82547 workaround to avoid controller hang in half-duplex environment.
2982 * The workaround is to avoid queuing a large packet that would span
2983 * the internal Tx FIFO ring boundary by notifying the stack to resend
2984 * the packet at a later time. This gives the Tx FIFO an opportunity to
2985 * flush all packets. When that occurs, we reset the Tx FIFO pointers
2986 * to the beginning of the Tx FIFO.
2989 #define E1000_FIFO_HDR 0x10
2990 #define E1000_82547_PAD_LEN 0x3E0
2992 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
2993 struct sk_buff *skb)
2995 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2996 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
2998 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3000 if (adapter->link_duplex != HALF_DUPLEX)
3001 goto no_fifo_stall_required;
3003 if (atomic_read(&adapter->tx_fifo_stall))
3006 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3007 atomic_set(&adapter->tx_fifo_stall, 1);
3011 no_fifo_stall_required:
3012 adapter->tx_fifo_head += skb_fifo_len;
3013 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3014 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3018 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3020 struct e1000_adapter *adapter = netdev_priv(netdev);
3021 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3023 netif_stop_queue(netdev);
3024 /* Herbert's original patch had:
3025 * smp_mb__after_netif_stop_queue();
3026 * but since that doesn't exist yet, just open code it. */
3029 /* We need to check again in a case another CPU has just
3030 * made room available. */
3031 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3035 netif_start_queue(netdev);
3036 ++adapter->restart_queue;
3040 static int e1000_maybe_stop_tx(struct net_device *netdev,
3041 struct e1000_tx_ring *tx_ring, int size)
3043 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3045 return __e1000_maybe_stop_tx(netdev, size);
3048 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3049 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3050 struct net_device *netdev)
3052 struct e1000_adapter *adapter = netdev_priv(netdev);
3053 struct e1000_hw *hw = &adapter->hw;
3054 struct e1000_tx_ring *tx_ring;
3055 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3056 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3057 unsigned int tx_flags = 0;
3058 unsigned int len = skb_headlen(skb);
3059 unsigned int nr_frags;
3065 /* This goes back to the question of how to logically map a tx queue
3066 * to a flow. Right now, performance is impacted slightly negatively
3067 * if using multiple tx queues. If the stack breaks away from a
3068 * single qdisc implementation, we can look at this again. */
3069 tx_ring = adapter->tx_ring;
3071 if (unlikely(skb->len <= 0)) {
3072 dev_kfree_skb_any(skb);
3073 return NETDEV_TX_OK;
3076 mss = skb_shinfo(skb)->gso_size;
3077 /* The controller does a simple calculation to
3078 * make sure there is enough room in the FIFO before
3079 * initiating the DMA for each buffer. The calc is:
3080 * 4 = ceil(buffer len/mss). To make sure we don't
3081 * overrun the FIFO, adjust the max buffer len if mss
3085 max_per_txd = min(mss << 2, max_per_txd);
3086 max_txd_pwr = fls(max_per_txd) - 1;
3088 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3089 if (skb->data_len && hdr_len == len) {
3090 switch (hw->mac_type) {
3091 unsigned int pull_size;
3093 /* Make sure we have room to chop off 4 bytes,
3094 * and that the end alignment will work out to
3095 * this hardware's requirements
3096 * NOTE: this is a TSO only workaround
3097 * if end byte alignment not correct move us
3098 * into the next dword */
3099 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3102 pull_size = min((unsigned int)4, skb->data_len);
3103 if (!__pskb_pull_tail(skb, pull_size)) {
3104 e_err(drv, "__pskb_pull_tail "
3106 dev_kfree_skb_any(skb);
3107 return NETDEV_TX_OK;
3109 len = skb_headlen(skb);
3118 /* reserve a descriptor for the offload context */
3119 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3123 /* Controller Erratum workaround */
3124 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3127 count += TXD_USE_COUNT(len, max_txd_pwr);
3129 if (adapter->pcix_82544)
3132 /* work-around for errata 10 and it applies to all controllers
3133 * in PCI-X mode, so add one more descriptor to the count
3135 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3139 nr_frags = skb_shinfo(skb)->nr_frags;
3140 for (f = 0; f < nr_frags; f++)
3141 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3143 if (adapter->pcix_82544)
3146 /* need: count + 2 desc gap to keep tail from touching
3147 * head, otherwise try next time */
3148 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3149 return NETDEV_TX_BUSY;
3151 if (unlikely(hw->mac_type == e1000_82547)) {
3152 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3153 netif_stop_queue(netdev);
3154 if (!test_bit(__E1000_DOWN, &adapter->flags))
3155 mod_timer(&adapter->tx_fifo_stall_timer,
3157 return NETDEV_TX_BUSY;
3161 if (unlikely(vlan_tx_tag_present(skb))) {
3162 tx_flags |= E1000_TX_FLAGS_VLAN;
3163 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3166 first = tx_ring->next_to_use;
3168 tso = e1000_tso(adapter, tx_ring, skb);
3170 dev_kfree_skb_any(skb);
3171 return NETDEV_TX_OK;
3175 if (likely(hw->mac_type != e1000_82544))
3176 tx_ring->last_tx_tso = 1;
3177 tx_flags |= E1000_TX_FLAGS_TSO;
3178 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3179 tx_flags |= E1000_TX_FLAGS_CSUM;
3181 if (likely(skb->protocol == htons(ETH_P_IP)))
3182 tx_flags |= E1000_TX_FLAGS_IPV4;
3184 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3188 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3189 /* Make sure there is space in the ring for the next send. */
3190 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3193 dev_kfree_skb_any(skb);
3194 tx_ring->buffer_info[first].time_stamp = 0;
3195 tx_ring->next_to_use = first;
3198 return NETDEV_TX_OK;
3202 * e1000_tx_timeout - Respond to a Tx Hang
3203 * @netdev: network interface device structure
3206 static void e1000_tx_timeout(struct net_device *netdev)
3208 struct e1000_adapter *adapter = netdev_priv(netdev);
3210 /* Do the reset outside of interrupt context */
3211 adapter->tx_timeout_count++;
3212 schedule_work(&adapter->reset_task);
3215 static void e1000_reset_task(struct work_struct *work)
3217 struct e1000_adapter *adapter =
3218 container_of(work, struct e1000_adapter, reset_task);
3220 e1000_reinit_safe(adapter);
3224 * e1000_get_stats - Get System Network Statistics
3225 * @netdev: network interface device structure
3227 * Returns the address of the device statistics structure.
3228 * The statistics are actually updated from the timer callback.
3231 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3233 /* only return the current stats */
3234 return &netdev->stats;
3238 * e1000_change_mtu - Change the Maximum Transfer Unit
3239 * @netdev: network interface device structure
3240 * @new_mtu: new value for maximum frame size
3242 * Returns 0 on success, negative on failure
3245 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3247 struct e1000_adapter *adapter = netdev_priv(netdev);
3248 struct e1000_hw *hw = &adapter->hw;
3249 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3251 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3252 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3253 e_err(probe, "Invalid MTU setting\n");
3257 /* Adapter-specific max frame size limits. */
3258 switch (hw->mac_type) {
3259 case e1000_undefined ... e1000_82542_rev2_1:
3260 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3261 e_err(probe, "Jumbo Frames not supported.\n");
3266 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3270 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3272 /* e1000_down has a dependency on max_frame_size */
3273 hw->max_frame_size = max_frame;
3274 if (netif_running(netdev))
3275 e1000_down(adapter);
3277 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3278 * means we reserve 2 more, this pushes us to allocate from the next
3280 * i.e. RXBUFFER_2048 --> size-4096 slab
3281 * however with the new *_jumbo_rx* routines, jumbo receives will use
3282 * fragmented skbs */
3284 if (max_frame <= E1000_RXBUFFER_2048)
3285 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3287 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3288 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3289 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3290 adapter->rx_buffer_len = PAGE_SIZE;
3293 /* adjust allocation if LPE protects us, and we aren't using SBP */
3294 if (!hw->tbi_compatibility_on &&
3295 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3296 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3297 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3299 pr_info("%s changing MTU from %d to %d\n",
3300 netdev->name, netdev->mtu, new_mtu);
3301 netdev->mtu = new_mtu;
3303 if (netif_running(netdev))
3306 e1000_reset(adapter);
3308 clear_bit(__E1000_RESETTING, &adapter->flags);
3314 * e1000_update_stats - Update the board statistics counters
3315 * @adapter: board private structure
3318 void e1000_update_stats(struct e1000_adapter *adapter)
3320 struct net_device *netdev = adapter->netdev;
3321 struct e1000_hw *hw = &adapter->hw;
3322 struct pci_dev *pdev = adapter->pdev;
3323 unsigned long flags;
3326 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3329 * Prevent stats update while adapter is being reset, or if the pci
3330 * connection is down.
3332 if (adapter->link_speed == 0)
3334 if (pci_channel_offline(pdev))
3337 spin_lock_irqsave(&adapter->stats_lock, flags);
3339 /* these counters are modified from e1000_tbi_adjust_stats,
3340 * called from the interrupt context, so they must only
3341 * be written while holding adapter->stats_lock
3344 adapter->stats.crcerrs += er32(CRCERRS);
3345 adapter->stats.gprc += er32(GPRC);
3346 adapter->stats.gorcl += er32(GORCL);
3347 adapter->stats.gorch += er32(GORCH);
3348 adapter->stats.bprc += er32(BPRC);
3349 adapter->stats.mprc += er32(MPRC);
3350 adapter->stats.roc += er32(ROC);
3352 adapter->stats.prc64 += er32(PRC64);
3353 adapter->stats.prc127 += er32(PRC127);
3354 adapter->stats.prc255 += er32(PRC255);
3355 adapter->stats.prc511 += er32(PRC511);
3356 adapter->stats.prc1023 += er32(PRC1023);
3357 adapter->stats.prc1522 += er32(PRC1522);
3359 adapter->stats.symerrs += er32(SYMERRS);
3360 adapter->stats.mpc += er32(MPC);
3361 adapter->stats.scc += er32(SCC);
3362 adapter->stats.ecol += er32(ECOL);
3363 adapter->stats.mcc += er32(MCC);
3364 adapter->stats.latecol += er32(LATECOL);
3365 adapter->stats.dc += er32(DC);
3366 adapter->stats.sec += er32(SEC);
3367 adapter->stats.rlec += er32(RLEC);
3368 adapter->stats.xonrxc += er32(XONRXC);
3369 adapter->stats.xontxc += er32(XONTXC);
3370 adapter->stats.xoffrxc += er32(XOFFRXC);
3371 adapter->stats.xofftxc += er32(XOFFTXC);
3372 adapter->stats.fcruc += er32(FCRUC);
3373 adapter->stats.gptc += er32(GPTC);
3374 adapter->stats.gotcl += er32(GOTCL);
3375 adapter->stats.gotch += er32(GOTCH);
3376 adapter->stats.rnbc += er32(RNBC);
3377 adapter->stats.ruc += er32(RUC);
3378 adapter->stats.rfc += er32(RFC);
3379 adapter->stats.rjc += er32(RJC);
3380 adapter->stats.torl += er32(TORL);
3381 adapter->stats.torh += er32(TORH);
3382 adapter->stats.totl += er32(TOTL);
3383 adapter->stats.toth += er32(TOTH);
3384 adapter->stats.tpr += er32(TPR);
3386 adapter->stats.ptc64 += er32(PTC64);
3387 adapter->stats.ptc127 += er32(PTC127);
3388 adapter->stats.ptc255 += er32(PTC255);
3389 adapter->stats.ptc511 += er32(PTC511);
3390 adapter->stats.ptc1023 += er32(PTC1023);
3391 adapter->stats.ptc1522 += er32(PTC1522);
3393 adapter->stats.mptc += er32(MPTC);
3394 adapter->stats.bptc += er32(BPTC);
3396 /* used for adaptive IFS */
3398 hw->tx_packet_delta = er32(TPT);
3399 adapter->stats.tpt += hw->tx_packet_delta;
3400 hw->collision_delta = er32(COLC);
3401 adapter->stats.colc += hw->collision_delta;
3403 if (hw->mac_type >= e1000_82543) {
3404 adapter->stats.algnerrc += er32(ALGNERRC);
3405 adapter->stats.rxerrc += er32(RXERRC);
3406 adapter->stats.tncrs += er32(TNCRS);
3407 adapter->stats.cexterr += er32(CEXTERR);
3408 adapter->stats.tsctc += er32(TSCTC);
3409 adapter->stats.tsctfc += er32(TSCTFC);
3412 /* Fill out the OS statistics structure */
3413 netdev->stats.multicast = adapter->stats.mprc;
3414 netdev->stats.collisions = adapter->stats.colc;
3418 /* RLEC on some newer hardware can be incorrect so build
3419 * our own version based on RUC and ROC */
3420 netdev->stats.rx_errors = adapter->stats.rxerrc +
3421 adapter->stats.crcerrs + adapter->stats.algnerrc +
3422 adapter->stats.ruc + adapter->stats.roc +
3423 adapter->stats.cexterr;
3424 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3425 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3426 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3427 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3428 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3431 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3432 netdev->stats.tx_errors = adapter->stats.txerrc;
3433 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3434 netdev->stats.tx_window_errors = adapter->stats.latecol;
3435 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3436 if (hw->bad_tx_carr_stats_fd &&
3437 adapter->link_duplex == FULL_DUPLEX) {
3438 netdev->stats.tx_carrier_errors = 0;
3439 adapter->stats.tncrs = 0;
3442 /* Tx Dropped needs to be maintained elsewhere */
3445 if (hw->media_type == e1000_media_type_copper) {
3446 if ((adapter->link_speed == SPEED_1000) &&
3447 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3448 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3449 adapter->phy_stats.idle_errors += phy_tmp;
3452 if ((hw->mac_type <= e1000_82546) &&
3453 (hw->phy_type == e1000_phy_m88) &&
3454 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3455 adapter->phy_stats.receive_errors += phy_tmp;
3458 /* Management Stats */
3459 if (hw->has_smbus) {
3460 adapter->stats.mgptc += er32(MGTPTC);
3461 adapter->stats.mgprc += er32(MGTPRC);
3462 adapter->stats.mgpdc += er32(MGTPDC);
3465 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3469 * e1000_intr - Interrupt Handler
3470 * @irq: interrupt number
3471 * @data: pointer to a network interface device structure
3474 static irqreturn_t e1000_intr(int irq, void *data)
3476 struct net_device *netdev = data;
3477 struct e1000_adapter *adapter = netdev_priv(netdev);
3478 struct e1000_hw *hw = &adapter->hw;
3479 u32 icr = er32(ICR);
3481 if (unlikely((!icr)))
3482 return IRQ_NONE; /* Not our interrupt */
3485 * we might have caused the interrupt, but the above
3486 * read cleared it, and just in case the driver is
3487 * down there is nothing to do so return handled
3489 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3492 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3493 hw->get_link_status = 1;
3494 /* guard against interrupt when we're going down */
3495 if (!test_bit(__E1000_DOWN, &adapter->flags))
3496 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3499 /* disable interrupts, without the synchronize_irq bit */
3501 E1000_WRITE_FLUSH();
3503 if (likely(napi_schedule_prep(&adapter->napi))) {
3504 adapter->total_tx_bytes = 0;
3505 adapter->total_tx_packets = 0;
3506 adapter->total_rx_bytes = 0;
3507 adapter->total_rx_packets = 0;
3508 __napi_schedule(&adapter->napi);
3510 /* this really should not happen! if it does it is basically a
3511 * bug, but not a hard error, so enable ints and continue */
3512 if (!test_bit(__E1000_DOWN, &adapter->flags))
3513 e1000_irq_enable(adapter);
3520 * e1000_clean - NAPI Rx polling callback
3521 * @adapter: board private structure
3523 static int e1000_clean(struct napi_struct *napi, int budget)
3525 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3526 int tx_clean_complete = 0, work_done = 0;
3528 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3530 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3532 if (!tx_clean_complete)
3535 /* If budget not fully consumed, exit the polling mode */
3536 if (work_done < budget) {
3537 if (likely(adapter->itr_setting & 3))
3538 e1000_set_itr(adapter);
3539 napi_complete(napi);
3540 if (!test_bit(__E1000_DOWN, &adapter->flags))
3541 e1000_irq_enable(adapter);
3548 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3549 * @adapter: board private structure
3551 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3552 struct e1000_tx_ring *tx_ring)
3554 struct e1000_hw *hw = &adapter->hw;
3555 struct net_device *netdev = adapter->netdev;
3556 struct e1000_tx_desc *tx_desc, *eop_desc;
3557 struct e1000_buffer *buffer_info;
3558 unsigned int i, eop;
3559 unsigned int count = 0;
3560 unsigned int total_tx_bytes=0, total_tx_packets=0;
3562 i = tx_ring->next_to_clean;
3563 eop = tx_ring->buffer_info[i].next_to_watch;
3564 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3566 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3567 (count < tx_ring->count)) {
3568 bool cleaned = false;
3569 rmb(); /* read buffer_info after eop_desc */
3570 for ( ; !cleaned; count++) {
3571 tx_desc = E1000_TX_DESC(*tx_ring, i);
3572 buffer_info = &tx_ring->buffer_info[i];
3573 cleaned = (i == eop);
3576 struct sk_buff *skb = buffer_info->skb;
3577 unsigned int segs, bytecount;
3578 segs = skb_shinfo(skb)->gso_segs ?: 1;
3579 /* multiply data chunks by size of headers */
3580 bytecount = ((segs - 1) * skb_headlen(skb)) +
3582 total_tx_packets += segs;
3583 total_tx_bytes += bytecount;
3585 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3586 tx_desc->upper.data = 0;
3588 if (unlikely(++i == tx_ring->count)) i = 0;
3591 eop = tx_ring->buffer_info[i].next_to_watch;
3592 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3595 tx_ring->next_to_clean = i;
3597 #define TX_WAKE_THRESHOLD 32
3598 if (unlikely(count && netif_carrier_ok(netdev) &&
3599 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3600 /* Make sure that anybody stopping the queue after this
3601 * sees the new next_to_clean.
3605 if (netif_queue_stopped(netdev) &&
3606 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3607 netif_wake_queue(netdev);
3608 ++adapter->restart_queue;
3612 if (adapter->detect_tx_hung) {
3613 /* Detect a transmit hang in hardware, this serializes the
3614 * check with the clearing of time_stamp and movement of i */
3615 adapter->detect_tx_hung = false;
3616 if (tx_ring->buffer_info[eop].time_stamp &&
3617 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3618 (adapter->tx_timeout_factor * HZ)) &&
3619 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3621 /* detected Tx unit hang */
3622 e_err(drv, "Detected Tx Unit Hang\n"
3626 " next_to_use <%x>\n"
3627 " next_to_clean <%x>\n"
3628 "buffer_info[next_to_clean]\n"
3629 " time_stamp <%lx>\n"
3630 " next_to_watch <%x>\n"
3632 " next_to_watch.status <%x>\n",
3633 (unsigned long)((tx_ring - adapter->tx_ring) /
3634 sizeof(struct e1000_tx_ring)),
3635 readl(hw->hw_addr + tx_ring->tdh),
3636 readl(hw->hw_addr + tx_ring->tdt),
3637 tx_ring->next_to_use,
3638 tx_ring->next_to_clean,
3639 tx_ring->buffer_info[eop].time_stamp,
3642 eop_desc->upper.fields.status);
3643 netif_stop_queue(netdev);
3646 adapter->total_tx_bytes += total_tx_bytes;
3647 adapter->total_tx_packets += total_tx_packets;
3648 netdev->stats.tx_bytes += total_tx_bytes;
3649 netdev->stats.tx_packets += total_tx_packets;
3650 return count < tx_ring->count;
3654 * e1000_rx_checksum - Receive Checksum Offload for 82543
3655 * @adapter: board private structure
3656 * @status_err: receive descriptor status and error fields
3657 * @csum: receive descriptor csum field
3658 * @sk_buff: socket buffer with received data
3661 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3662 u32 csum, struct sk_buff *skb)
3664 struct e1000_hw *hw = &adapter->hw;
3665 u16 status = (u16)status_err;
3666 u8 errors = (u8)(status_err >> 24);
3668 skb_checksum_none_assert(skb);
3670 /* 82543 or newer only */
3671 if (unlikely(hw->mac_type < e1000_82543)) return;
3672 /* Ignore Checksum bit is set */
3673 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3674 /* TCP/UDP checksum error bit is set */
3675 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3676 /* let the stack verify checksum errors */
3677 adapter->hw_csum_err++;
3680 /* TCP/UDP Checksum has not been calculated */
3681 if (!(status & E1000_RXD_STAT_TCPCS))
3684 /* It must be a TCP or UDP packet with a valid checksum */
3685 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3686 /* TCP checksum is good */
3687 skb->ip_summed = CHECKSUM_UNNECESSARY;
3689 adapter->hw_csum_good++;
3693 * e1000_consume_page - helper function
3695 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3700 skb->data_len += length;
3701 skb->truesize += length;
3705 * e1000_receive_skb - helper function to handle rx indications
3706 * @adapter: board private structure
3707 * @status: descriptor status field as written by hardware
3708 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3709 * @skb: pointer to sk_buff to be indicated to stack
3711 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3712 __le16 vlan, struct sk_buff *skb)
3714 skb->protocol = eth_type_trans(skb, adapter->netdev);
3716 if ((unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))))
3717 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
3718 le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK,
3721 napi_gro_receive(&adapter->napi, skb);
3725 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3726 * @adapter: board private structure
3727 * @rx_ring: ring to clean
3728 * @work_done: amount of napi work completed this call
3729 * @work_to_do: max amount of work allowed for this call to do
3731 * the return value indicates whether actual cleaning was done, there
3732 * is no guarantee that everything was cleaned
3734 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3735 struct e1000_rx_ring *rx_ring,
3736 int *work_done, int work_to_do)
3738 struct e1000_hw *hw = &adapter->hw;
3739 struct net_device *netdev = adapter->netdev;
3740 struct pci_dev *pdev = adapter->pdev;
3741 struct e1000_rx_desc *rx_desc, *next_rxd;
3742 struct e1000_buffer *buffer_info, *next_buffer;
3743 unsigned long irq_flags;
3746 int cleaned_count = 0;
3747 bool cleaned = false;
3748 unsigned int total_rx_bytes=0, total_rx_packets=0;
3750 i = rx_ring->next_to_clean;
3751 rx_desc = E1000_RX_DESC(*rx_ring, i);
3752 buffer_info = &rx_ring->buffer_info[i];
3754 while (rx_desc->status & E1000_RXD_STAT_DD) {
3755 struct sk_buff *skb;
3758 if (*work_done >= work_to_do)
3761 rmb(); /* read descriptor and rx_buffer_info after status DD */
3763 status = rx_desc->status;
3764 skb = buffer_info->skb;
3765 buffer_info->skb = NULL;
3767 if (++i == rx_ring->count) i = 0;
3768 next_rxd = E1000_RX_DESC(*rx_ring, i);
3771 next_buffer = &rx_ring->buffer_info[i];
3775 dma_unmap_page(&pdev->dev, buffer_info->dma,
3776 buffer_info->length, DMA_FROM_DEVICE);
3777 buffer_info->dma = 0;
3779 length = le16_to_cpu(rx_desc->length);
3781 /* errors is only valid for DD + EOP descriptors */
3782 if (unlikely((status & E1000_RXD_STAT_EOP) &&
3783 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
3784 u8 last_byte = *(skb->data + length - 1);
3785 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
3787 spin_lock_irqsave(&adapter->stats_lock,
3789 e1000_tbi_adjust_stats(hw, &adapter->stats,
3791 spin_unlock_irqrestore(&adapter->stats_lock,
3795 /* recycle both page and skb */
3796 buffer_info->skb = skb;
3797 /* an error means any chain goes out the window
3799 if (rx_ring->rx_skb_top)
3800 dev_kfree_skb(rx_ring->rx_skb_top);
3801 rx_ring->rx_skb_top = NULL;
3806 #define rxtop rx_ring->rx_skb_top
3807 if (!(status & E1000_RXD_STAT_EOP)) {
3808 /* this descriptor is only the beginning (or middle) */
3810 /* this is the beginning of a chain */
3812 skb_fill_page_desc(rxtop, 0, buffer_info->page,
3815 /* this is the middle of a chain */
3816 skb_fill_page_desc(rxtop,
3817 skb_shinfo(rxtop)->nr_frags,
3818 buffer_info->page, 0, length);
3819 /* re-use the skb, only consumed the page */
3820 buffer_info->skb = skb;
3822 e1000_consume_page(buffer_info, rxtop, length);
3826 /* end of the chain */
3827 skb_fill_page_desc(rxtop,
3828 skb_shinfo(rxtop)->nr_frags,
3829 buffer_info->page, 0, length);
3830 /* re-use the current skb, we only consumed the
3832 buffer_info->skb = skb;
3835 e1000_consume_page(buffer_info, skb, length);
3837 /* no chain, got EOP, this buf is the packet
3838 * copybreak to save the put_page/alloc_page */
3839 if (length <= copybreak &&
3840 skb_tailroom(skb) >= length) {
3842 vaddr = kmap_atomic(buffer_info->page,
3843 KM_SKB_DATA_SOFTIRQ);
3844 memcpy(skb_tail_pointer(skb), vaddr, length);
3845 kunmap_atomic(vaddr,
3846 KM_SKB_DATA_SOFTIRQ);
3847 /* re-use the page, so don't erase
3848 * buffer_info->page */
3849 skb_put(skb, length);
3851 skb_fill_page_desc(skb, 0,
3852 buffer_info->page, 0,
3854 e1000_consume_page(buffer_info, skb,
3860 /* Receive Checksum Offload XXX recompute due to CRC strip? */
3861 e1000_rx_checksum(adapter,
3863 ((u32)(rx_desc->errors) << 24),
3864 le16_to_cpu(rx_desc->csum), skb);
3866 pskb_trim(skb, skb->len - 4);
3868 /* probably a little skewed due to removing CRC */
3869 total_rx_bytes += skb->len;
3872 /* eth type trans needs skb->data to point to something */
3873 if (!pskb_may_pull(skb, ETH_HLEN)) {
3874 e_err(drv, "pskb_may_pull failed.\n");
3879 e1000_receive_skb(adapter, status, rx_desc->special, skb);
3882 rx_desc->status = 0;
3884 /* return some buffers to hardware, one at a time is too slow */
3885 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3886 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3890 /* use prefetched values */
3892 buffer_info = next_buffer;
3894 rx_ring->next_to_clean = i;
3896 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3898 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3900 adapter->total_rx_packets += total_rx_packets;
3901 adapter->total_rx_bytes += total_rx_bytes;
3902 netdev->stats.rx_bytes += total_rx_bytes;
3903 netdev->stats.rx_packets += total_rx_packets;
3908 * this should improve performance for small packets with large amounts
3909 * of reassembly being done in the stack
3911 static void e1000_check_copybreak(struct net_device *netdev,
3912 struct e1000_buffer *buffer_info,
3913 u32 length, struct sk_buff **skb)
3915 struct sk_buff *new_skb;
3917 if (length > copybreak)
3920 new_skb = netdev_alloc_skb_ip_align(netdev, length);
3924 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
3925 (*skb)->data - NET_IP_ALIGN,
3926 length + NET_IP_ALIGN);
3927 /* save the skb in buffer_info as good */
3928 buffer_info->skb = *skb;
3933 * e1000_clean_rx_irq - Send received data up the network stack; legacy
3934 * @adapter: board private structure
3935 * @rx_ring: ring to clean
3936 * @work_done: amount of napi work completed this call
3937 * @work_to_do: max amount of work allowed for this call to do
3939 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3940 struct e1000_rx_ring *rx_ring,
3941 int *work_done, int work_to_do)
3943 struct e1000_hw *hw = &adapter->hw;
3944 struct net_device *netdev = adapter->netdev;
3945 struct pci_dev *pdev = adapter->pdev;
3946 struct e1000_rx_desc *rx_desc, *next_rxd;
3947 struct e1000_buffer *buffer_info, *next_buffer;
3948 unsigned long flags;
3951 int cleaned_count = 0;
3952 bool cleaned = false;
3953 unsigned int total_rx_bytes=0, total_rx_packets=0;
3955 i = rx_ring->next_to_clean;
3956 rx_desc = E1000_RX_DESC(*rx_ring, i);
3957 buffer_info = &rx_ring->buffer_info[i];
3959 while (rx_desc->status & E1000_RXD_STAT_DD) {
3960 struct sk_buff *skb;
3963 if (*work_done >= work_to_do)
3966 rmb(); /* read descriptor and rx_buffer_info after status DD */
3968 status = rx_desc->status;
3969 skb = buffer_info->skb;
3970 buffer_info->skb = NULL;
3972 prefetch(skb->data - NET_IP_ALIGN);
3974 if (++i == rx_ring->count) i = 0;
3975 next_rxd = E1000_RX_DESC(*rx_ring, i);
3978 next_buffer = &rx_ring->buffer_info[i];
3982 dma_unmap_single(&pdev->dev, buffer_info->dma,
3983 buffer_info->length, DMA_FROM_DEVICE);
3984 buffer_info->dma = 0;
3986 length = le16_to_cpu(rx_desc->length);
3987 /* !EOP means multiple descriptors were used to store a single
3988 * packet, if thats the case we need to toss it. In fact, we
3989 * to toss every packet with the EOP bit clear and the next
3990 * frame that _does_ have the EOP bit set, as it is by
3991 * definition only a frame fragment
3993 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
3994 adapter->discarding = true;
3996 if (adapter->discarding) {
3997 /* All receives must fit into a single buffer */
3998 e_dbg("Receive packet consumed multiple buffers\n");
4000 buffer_info->skb = skb;
4001 if (status & E1000_RXD_STAT_EOP)
4002 adapter->discarding = false;
4006 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4007 u8 last_byte = *(skb->data + length - 1);
4008 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4010 spin_lock_irqsave(&adapter->stats_lock, flags);
4011 e1000_tbi_adjust_stats(hw, &adapter->stats,
4013 spin_unlock_irqrestore(&adapter->stats_lock,
4018 buffer_info->skb = skb;
4023 /* adjust length to remove Ethernet CRC, this must be
4024 * done after the TBI_ACCEPT workaround above */
4027 /* probably a little skewed due to removing CRC */
4028 total_rx_bytes += length;
4031 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4033 skb_put(skb, length);
4035 /* Receive Checksum Offload */
4036 e1000_rx_checksum(adapter,
4038 ((u32)(rx_desc->errors) << 24),
4039 le16_to_cpu(rx_desc->csum), skb);
4041 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4044 rx_desc->status = 0;
4046 /* return some buffers to hardware, one at a time is too slow */
4047 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4048 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4052 /* use prefetched values */
4054 buffer_info = next_buffer;
4056 rx_ring->next_to_clean = i;
4058 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4060 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4062 adapter->total_rx_packets += total_rx_packets;
4063 adapter->total_rx_bytes += total_rx_bytes;
4064 netdev->stats.rx_bytes += total_rx_bytes;
4065 netdev->stats.rx_packets += total_rx_packets;
4070 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4071 * @adapter: address of board private structure
4072 * @rx_ring: pointer to receive ring structure
4073 * @cleaned_count: number of buffers to allocate this pass
4077 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4078 struct e1000_rx_ring *rx_ring, int cleaned_count)
4080 struct net_device *netdev = adapter->netdev;
4081 struct pci_dev *pdev = adapter->pdev;
4082 struct e1000_rx_desc *rx_desc;
4083 struct e1000_buffer *buffer_info;
4084 struct sk_buff *skb;
4086 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4088 i = rx_ring->next_to_use;
4089 buffer_info = &rx_ring->buffer_info[i];
4091 while (cleaned_count--) {
4092 skb = buffer_info->skb;
4098 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4099 if (unlikely(!skb)) {
4100 /* Better luck next round */
4101 adapter->alloc_rx_buff_failed++;
4105 /* Fix for errata 23, can't cross 64kB boundary */
4106 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4107 struct sk_buff *oldskb = skb;
4108 e_err(rx_err, "skb align check failed: %u bytes at "
4109 "%p\n", bufsz, skb->data);
4110 /* Try again, without freeing the previous */
4111 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4112 /* Failed allocation, critical failure */
4114 dev_kfree_skb(oldskb);
4115 adapter->alloc_rx_buff_failed++;
4119 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4122 dev_kfree_skb(oldskb);
4123 break; /* while (cleaned_count--) */
4126 /* Use new allocation */
4127 dev_kfree_skb(oldskb);
4129 buffer_info->skb = skb;
4130 buffer_info->length = adapter->rx_buffer_len;
4132 /* allocate a new page if necessary */
4133 if (!buffer_info->page) {
4134 buffer_info->page = alloc_page(GFP_ATOMIC);
4135 if (unlikely(!buffer_info->page)) {
4136 adapter->alloc_rx_buff_failed++;
4141 if (!buffer_info->dma) {
4142 buffer_info->dma = dma_map_page(&pdev->dev,
4143 buffer_info->page, 0,
4144 buffer_info->length,
4146 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4147 put_page(buffer_info->page);
4149 buffer_info->page = NULL;
4150 buffer_info->skb = NULL;
4151 buffer_info->dma = 0;
4152 adapter->alloc_rx_buff_failed++;
4153 break; /* while !buffer_info->skb */
4157 rx_desc = E1000_RX_DESC(*rx_ring, i);
4158 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4160 if (unlikely(++i == rx_ring->count))
4162 buffer_info = &rx_ring->buffer_info[i];
4165 if (likely(rx_ring->next_to_use != i)) {
4166 rx_ring->next_to_use = i;
4167 if (unlikely(i-- == 0))
4168 i = (rx_ring->count - 1);
4170 /* Force memory writes to complete before letting h/w
4171 * know there are new descriptors to fetch. (Only
4172 * applicable for weak-ordered memory model archs,
4173 * such as IA-64). */
4175 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4180 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4181 * @adapter: address of board private structure
4184 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4185 struct e1000_rx_ring *rx_ring,
4188 struct e1000_hw *hw = &adapter->hw;
4189 struct net_device *netdev = adapter->netdev;
4190 struct pci_dev *pdev = adapter->pdev;
4191 struct e1000_rx_desc *rx_desc;
4192 struct e1000_buffer *buffer_info;
4193 struct sk_buff *skb;
4195 unsigned int bufsz = adapter->rx_buffer_len;
4197 i = rx_ring->next_to_use;
4198 buffer_info = &rx_ring->buffer_info[i];
4200 while (cleaned_count--) {
4201 skb = buffer_info->skb;
4207 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4208 if (unlikely(!skb)) {
4209 /* Better luck next round */
4210 adapter->alloc_rx_buff_failed++;
4214 /* Fix for errata 23, can't cross 64kB boundary */
4215 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4216 struct sk_buff *oldskb = skb;
4217 e_err(rx_err, "skb align check failed: %u bytes at "
4218 "%p\n", bufsz, skb->data);
4219 /* Try again, without freeing the previous */
4220 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4221 /* Failed allocation, critical failure */
4223 dev_kfree_skb(oldskb);
4224 adapter->alloc_rx_buff_failed++;
4228 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4231 dev_kfree_skb(oldskb);
4232 adapter->alloc_rx_buff_failed++;
4233 break; /* while !buffer_info->skb */
4236 /* Use new allocation */
4237 dev_kfree_skb(oldskb);
4239 buffer_info->skb = skb;
4240 buffer_info->length = adapter->rx_buffer_len;
4242 buffer_info->dma = dma_map_single(&pdev->dev,
4244 buffer_info->length,
4246 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4248 buffer_info->skb = NULL;
4249 buffer_info->dma = 0;
4250 adapter->alloc_rx_buff_failed++;
4251 break; /* while !buffer_info->skb */
4255 * XXX if it was allocated cleanly it will never map to a
4259 /* Fix for errata 23, can't cross 64kB boundary */
4260 if (!e1000_check_64k_bound(adapter,
4261 (void *)(unsigned long)buffer_info->dma,
4262 adapter->rx_buffer_len)) {
4263 e_err(rx_err, "dma align check failed: %u bytes at "
4264 "%p\n", adapter->rx_buffer_len,
4265 (void *)(unsigned long)buffer_info->dma);
4267 buffer_info->skb = NULL;
4269 dma_unmap_single(&pdev->dev, buffer_info->dma,
4270 adapter->rx_buffer_len,
4272 buffer_info->dma = 0;
4274 adapter->alloc_rx_buff_failed++;
4275 break; /* while !buffer_info->skb */
4277 rx_desc = E1000_RX_DESC(*rx_ring, i);
4278 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4280 if (unlikely(++i == rx_ring->count))
4282 buffer_info = &rx_ring->buffer_info[i];
4285 if (likely(rx_ring->next_to_use != i)) {
4286 rx_ring->next_to_use = i;
4287 if (unlikely(i-- == 0))
4288 i = (rx_ring->count - 1);
4290 /* Force memory writes to complete before letting h/w
4291 * know there are new descriptors to fetch. (Only
4292 * applicable for weak-ordered memory model archs,
4293 * such as IA-64). */
4295 writel(i, hw->hw_addr + rx_ring->rdt);
4300 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4304 static void e1000_smartspeed(struct e1000_adapter *adapter)
4306 struct e1000_hw *hw = &adapter->hw;
4310 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4311 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4314 if (adapter->smartspeed == 0) {
4315 /* If Master/Slave config fault is asserted twice,
4316 * we assume back-to-back */
4317 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4318 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4319 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4320 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4321 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4322 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4323 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4324 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4326 adapter->smartspeed++;
4327 if (!e1000_phy_setup_autoneg(hw) &&
4328 !e1000_read_phy_reg(hw, PHY_CTRL,
4330 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4331 MII_CR_RESTART_AUTO_NEG);
4332 e1000_write_phy_reg(hw, PHY_CTRL,
4337 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4338 /* If still no link, perhaps using 2/3 pair cable */
4339 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4340 phy_ctrl |= CR_1000T_MS_ENABLE;
4341 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4342 if (!e1000_phy_setup_autoneg(hw) &&
4343 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4344 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4345 MII_CR_RESTART_AUTO_NEG);
4346 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4349 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4350 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4351 adapter->smartspeed = 0;
4361 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4367 return e1000_mii_ioctl(netdev, ifr, cmd);
4380 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4383 struct e1000_adapter *adapter = netdev_priv(netdev);
4384 struct e1000_hw *hw = &adapter->hw;
4385 struct mii_ioctl_data *data = if_mii(ifr);
4389 unsigned long flags;
4391 if (hw->media_type != e1000_media_type_copper)
4396 data->phy_id = hw->phy_addr;
4399 spin_lock_irqsave(&adapter->stats_lock, flags);
4400 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4402 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4405 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4408 if (data->reg_num & ~(0x1F))
4410 mii_reg = data->val_in;
4411 spin_lock_irqsave(&adapter->stats_lock, flags);
4412 if (e1000_write_phy_reg(hw, data->reg_num,
4414 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4417 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4418 if (hw->media_type == e1000_media_type_copper) {
4419 switch (data->reg_num) {
4421 if (mii_reg & MII_CR_POWER_DOWN)
4423 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4425 hw->autoneg_advertised = 0x2F;
4428 spddplx = SPEED_1000;
4429 else if (mii_reg & 0x2000)
4430 spddplx = SPEED_100;
4433 spddplx += (mii_reg & 0x100)
4436 retval = e1000_set_spd_dplx(adapter,
4441 if (netif_running(adapter->netdev))
4442 e1000_reinit_locked(adapter);
4444 e1000_reset(adapter);
4446 case M88E1000_PHY_SPEC_CTRL:
4447 case M88E1000_EXT_PHY_SPEC_CTRL:
4448 if (e1000_phy_reset(hw))
4453 switch (data->reg_num) {
4455 if (mii_reg & MII_CR_POWER_DOWN)
4457 if (netif_running(adapter->netdev))
4458 e1000_reinit_locked(adapter);
4460 e1000_reset(adapter);
4468 return E1000_SUCCESS;
4471 void e1000_pci_set_mwi(struct e1000_hw *hw)
4473 struct e1000_adapter *adapter = hw->back;
4474 int ret_val = pci_set_mwi(adapter->pdev);
4477 e_err(probe, "Error in setting MWI\n");
4480 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4482 struct e1000_adapter *adapter = hw->back;
4484 pci_clear_mwi(adapter->pdev);
4487 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4489 struct e1000_adapter *adapter = hw->back;
4490 return pcix_get_mmrbc(adapter->pdev);
4493 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4495 struct e1000_adapter *adapter = hw->back;
4496 pcix_set_mmrbc(adapter->pdev, mmrbc);
4499 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4504 static void e1000_vlan_rx_register(struct net_device *netdev,
4505 struct vlan_group *grp)
4507 struct e1000_adapter *adapter = netdev_priv(netdev);
4508 struct e1000_hw *hw = &adapter->hw;
4511 if (!test_bit(__E1000_DOWN, &adapter->flags))
4512 e1000_irq_disable(adapter);
4513 adapter->vlgrp = grp;
4516 /* enable VLAN tag insert/strip */
4518 ctrl |= E1000_CTRL_VME;
4521 /* enable VLAN receive filtering */
4523 rctl &= ~E1000_RCTL_CFIEN;
4524 if (!(netdev->flags & IFF_PROMISC))
4525 rctl |= E1000_RCTL_VFE;
4527 e1000_update_mng_vlan(adapter);
4529 /* disable VLAN tag insert/strip */
4531 ctrl &= ~E1000_CTRL_VME;
4534 /* disable VLAN receive filtering */
4536 rctl &= ~E1000_RCTL_VFE;
4539 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
4540 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4541 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4545 if (!test_bit(__E1000_DOWN, &adapter->flags))
4546 e1000_irq_enable(adapter);
4549 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4551 struct e1000_adapter *adapter = netdev_priv(netdev);
4552 struct e1000_hw *hw = &adapter->hw;
4555 if ((hw->mng_cookie.status &
4556 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4557 (vid == adapter->mng_vlan_id))
4559 /* add VID to filter table */
4560 index = (vid >> 5) & 0x7F;
4561 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4562 vfta |= (1 << (vid & 0x1F));
4563 e1000_write_vfta(hw, index, vfta);
4566 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4568 struct e1000_adapter *adapter = netdev_priv(netdev);
4569 struct e1000_hw *hw = &adapter->hw;
4572 if (!test_bit(__E1000_DOWN, &adapter->flags))
4573 e1000_irq_disable(adapter);
4574 vlan_group_set_device(adapter->vlgrp, vid, NULL);
4575 if (!test_bit(__E1000_DOWN, &adapter->flags))
4576 e1000_irq_enable(adapter);
4578 /* remove VID from filter table */
4579 index = (vid >> 5) & 0x7F;
4580 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4581 vfta &= ~(1 << (vid & 0x1F));
4582 e1000_write_vfta(hw, index, vfta);
4585 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4587 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4589 if (adapter->vlgrp) {
4591 for (vid = 0; vid < VLAN_N_VID; vid++) {
4592 if (!vlan_group_get_device(adapter->vlgrp, vid))
4594 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4599 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4601 struct e1000_hw *hw = &adapter->hw;
4605 /* Fiber NICs only allow 1000 gbps Full duplex */
4606 if ((hw->media_type == e1000_media_type_fiber) &&
4607 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4608 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4613 case SPEED_10 + DUPLEX_HALF:
4614 hw->forced_speed_duplex = e1000_10_half;
4616 case SPEED_10 + DUPLEX_FULL:
4617 hw->forced_speed_duplex = e1000_10_full;
4619 case SPEED_100 + DUPLEX_HALF:
4620 hw->forced_speed_duplex = e1000_100_half;
4622 case SPEED_100 + DUPLEX_FULL:
4623 hw->forced_speed_duplex = e1000_100_full;
4625 case SPEED_1000 + DUPLEX_FULL:
4627 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4629 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4631 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4637 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4639 struct net_device *netdev = pci_get_drvdata(pdev);
4640 struct e1000_adapter *adapter = netdev_priv(netdev);
4641 struct e1000_hw *hw = &adapter->hw;
4642 u32 ctrl, ctrl_ext, rctl, status;
4643 u32 wufc = adapter->wol;
4648 netif_device_detach(netdev);
4650 if (netif_running(netdev)) {
4651 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4652 e1000_down(adapter);
4656 retval = pci_save_state(pdev);
4661 status = er32(STATUS);
4662 if (status & E1000_STATUS_LU)
4663 wufc &= ~E1000_WUFC_LNKC;
4666 e1000_setup_rctl(adapter);
4667 e1000_set_rx_mode(netdev);
4669 /* turn on all-multi mode if wake on multicast is enabled */
4670 if (wufc & E1000_WUFC_MC) {
4672 rctl |= E1000_RCTL_MPE;
4676 if (hw->mac_type >= e1000_82540) {
4678 /* advertise wake from D3Cold */
4679 #define E1000_CTRL_ADVD3WUC 0x00100000
4680 /* phy power management enable */
4681 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4682 ctrl |= E1000_CTRL_ADVD3WUC |
4683 E1000_CTRL_EN_PHY_PWR_MGMT;
4687 if (hw->media_type == e1000_media_type_fiber ||
4688 hw->media_type == e1000_media_type_internal_serdes) {
4689 /* keep the laser running in D3 */
4690 ctrl_ext = er32(CTRL_EXT);
4691 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4692 ew32(CTRL_EXT, ctrl_ext);
4695 ew32(WUC, E1000_WUC_PME_EN);
4702 e1000_release_manageability(adapter);
4704 *enable_wake = !!wufc;
4706 /* make sure adapter isn't asleep if manageability is enabled */
4707 if (adapter->en_mng_pt)
4708 *enable_wake = true;
4710 if (netif_running(netdev))
4711 e1000_free_irq(adapter);
4713 pci_disable_device(pdev);
4719 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4724 retval = __e1000_shutdown(pdev, &wake);
4729 pci_prepare_to_sleep(pdev);
4731 pci_wake_from_d3(pdev, false);
4732 pci_set_power_state(pdev, PCI_D3hot);
4738 static int e1000_resume(struct pci_dev *pdev)
4740 struct net_device *netdev = pci_get_drvdata(pdev);
4741 struct e1000_adapter *adapter = netdev_priv(netdev);
4742 struct e1000_hw *hw = &adapter->hw;
4745 pci_set_power_state(pdev, PCI_D0);
4746 pci_restore_state(pdev);
4747 pci_save_state(pdev);
4749 if (adapter->need_ioport)
4750 err = pci_enable_device(pdev);
4752 err = pci_enable_device_mem(pdev);
4754 pr_err("Cannot enable PCI device from suspend\n");
4757 pci_set_master(pdev);
4759 pci_enable_wake(pdev, PCI_D3hot, 0);
4760 pci_enable_wake(pdev, PCI_D3cold, 0);
4762 if (netif_running(netdev)) {
4763 err = e1000_request_irq(adapter);
4768 e1000_power_up_phy(adapter);
4769 e1000_reset(adapter);
4772 e1000_init_manageability(adapter);
4774 if (netif_running(netdev))
4777 netif_device_attach(netdev);
4783 static void e1000_shutdown(struct pci_dev *pdev)
4787 __e1000_shutdown(pdev, &wake);
4789 if (system_state == SYSTEM_POWER_OFF) {
4790 pci_wake_from_d3(pdev, wake);
4791 pci_set_power_state(pdev, PCI_D3hot);
4795 #ifdef CONFIG_NET_POLL_CONTROLLER
4797 * Polling 'interrupt' - used by things like netconsole to send skbs
4798 * without having to re-enable interrupts. It's not called while
4799 * the interrupt routine is executing.
4801 static void e1000_netpoll(struct net_device *netdev)
4803 struct e1000_adapter *adapter = netdev_priv(netdev);
4805 disable_irq(adapter->pdev->irq);
4806 e1000_intr(adapter->pdev->irq, netdev);
4807 enable_irq(adapter->pdev->irq);
4812 * e1000_io_error_detected - called when PCI error is detected
4813 * @pdev: Pointer to PCI device
4814 * @state: The current pci connection state
4816 * This function is called after a PCI bus error affecting
4817 * this device has been detected.
4819 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4820 pci_channel_state_t state)
4822 struct net_device *netdev = pci_get_drvdata(pdev);
4823 struct e1000_adapter *adapter = netdev_priv(netdev);
4825 netif_device_detach(netdev);
4827 if (state == pci_channel_io_perm_failure)
4828 return PCI_ERS_RESULT_DISCONNECT;
4830 if (netif_running(netdev))
4831 e1000_down(adapter);
4832 pci_disable_device(pdev);
4834 /* Request a slot slot reset. */
4835 return PCI_ERS_RESULT_NEED_RESET;
4839 * e1000_io_slot_reset - called after the pci bus has been reset.
4840 * @pdev: Pointer to PCI device
4842 * Restart the card from scratch, as if from a cold-boot. Implementation
4843 * resembles the first-half of the e1000_resume routine.
4845 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4847 struct net_device *netdev = pci_get_drvdata(pdev);
4848 struct e1000_adapter *adapter = netdev_priv(netdev);
4849 struct e1000_hw *hw = &adapter->hw;
4852 if (adapter->need_ioport)
4853 err = pci_enable_device(pdev);
4855 err = pci_enable_device_mem(pdev);
4857 pr_err("Cannot re-enable PCI device after reset.\n");
4858 return PCI_ERS_RESULT_DISCONNECT;
4860 pci_set_master(pdev);
4862 pci_enable_wake(pdev, PCI_D3hot, 0);
4863 pci_enable_wake(pdev, PCI_D3cold, 0);
4865 e1000_reset(adapter);
4868 return PCI_ERS_RESULT_RECOVERED;
4872 * e1000_io_resume - called when traffic can start flowing again.
4873 * @pdev: Pointer to PCI device
4875 * This callback is called when the error recovery driver tells us that
4876 * its OK to resume normal operation. Implementation resembles the
4877 * second-half of the e1000_resume routine.
4879 static void e1000_io_resume(struct pci_dev *pdev)
4881 struct net_device *netdev = pci_get_drvdata(pdev);
4882 struct e1000_adapter *adapter = netdev_priv(netdev);
4884 e1000_init_manageability(adapter);
4886 if (netif_running(netdev)) {
4887 if (e1000_up(adapter)) {
4888 pr_info("can't bring device back up after reset\n");
4893 netif_device_attach(netdev);