1 /*******************************************************************************
3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 char ixgb_driver_name[] = "ixgb";
34 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
36 #define DRIVERNAPI "-NAPI"
37 #define DRV_VERSION "1.0.135-k2" DRIVERNAPI
38 const char ixgb_driver_version[] = DRV_VERSION;
39 static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
41 #define IXGB_CB_LENGTH 256
42 static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
43 module_param(copybreak, uint, 0644);
44 MODULE_PARM_DESC(copybreak,
45 "Maximum size of packet that is copied to a new buffer on receive");
47 /* ixgb_pci_tbl - PCI Device ID Table
49 * Wildcard entries (PCI_ANY_ID) should come last
50 * Last entry must be all 0s
52 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
53 * Class, Class Mask, private data (not used) }
55 static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
57 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
58 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
59 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
60 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
61 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
62 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
63 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
65 /* required last entry */
69 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
71 /* Local Function Prototypes */
72 static int ixgb_init_module(void);
73 static void ixgb_exit_module(void);
74 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
75 static void __devexit ixgb_remove(struct pci_dev *pdev);
76 static int ixgb_sw_init(struct ixgb_adapter *adapter);
77 static int ixgb_open(struct net_device *netdev);
78 static int ixgb_close(struct net_device *netdev);
79 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
80 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
81 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
82 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
83 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
84 static void ixgb_set_multi(struct net_device *netdev);
85 static void ixgb_watchdog(unsigned long data);
86 static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
87 struct net_device *netdev);
88 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
89 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
90 static int ixgb_set_mac(struct net_device *netdev, void *p);
91 static irqreturn_t ixgb_intr(int irq, void *data);
92 static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
94 static int ixgb_clean(struct napi_struct *, int);
95 static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
96 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
98 static void ixgb_tx_timeout(struct net_device *dev);
99 static void ixgb_tx_timeout_task(struct work_struct *work);
101 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
102 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
103 static void ixgb_vlan_rx_register(struct net_device *netdev,
104 struct vlan_group *grp);
105 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
106 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
107 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
109 #ifdef CONFIG_NET_POLL_CONTROLLER
110 /* for netdump / net console */
111 static void ixgb_netpoll(struct net_device *dev);
114 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
115 enum pci_channel_state state);
116 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
117 static void ixgb_io_resume (struct pci_dev *pdev);
119 static struct pci_error_handlers ixgb_err_handler = {
120 .error_detected = ixgb_io_error_detected,
121 .slot_reset = ixgb_io_slot_reset,
122 .resume = ixgb_io_resume,
125 static struct pci_driver ixgb_driver = {
126 .name = ixgb_driver_name,
127 .id_table = ixgb_pci_tbl,
129 .remove = __devexit_p(ixgb_remove),
130 .err_handler = &ixgb_err_handler
134 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
135 MODULE_LICENSE("GPL");
136 MODULE_VERSION(DRV_VERSION);
138 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
139 static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
140 module_param(debug, int, 0);
141 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
144 * ixgb_init_module - Driver Registration Routine
146 * ixgb_init_module is the first routine called when the driver is
147 * loaded. All it does is register with the PCI subsystem.
151 ixgb_init_module(void)
153 pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
154 pr_info("%s\n", ixgb_copyright);
156 return pci_register_driver(&ixgb_driver);
159 module_init(ixgb_init_module);
162 * ixgb_exit_module - Driver Exit Cleanup Routine
164 * ixgb_exit_module is called just before the driver is removed
169 ixgb_exit_module(void)
171 pci_unregister_driver(&ixgb_driver);
174 module_exit(ixgb_exit_module);
177 * ixgb_irq_disable - Mask off interrupt generation on the NIC
178 * @adapter: board private structure
182 ixgb_irq_disable(struct ixgb_adapter *adapter)
184 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
185 IXGB_WRITE_FLUSH(&adapter->hw);
186 synchronize_irq(adapter->pdev->irq);
190 * ixgb_irq_enable - Enable default interrupt generation settings
191 * @adapter: board private structure
195 ixgb_irq_enable(struct ixgb_adapter *adapter)
197 u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
198 IXGB_INT_TXDW | IXGB_INT_LSC;
199 if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
200 val |= IXGB_INT_GPI0;
201 IXGB_WRITE_REG(&adapter->hw, IMS, val);
202 IXGB_WRITE_FLUSH(&adapter->hw);
206 ixgb_up(struct ixgb_adapter *adapter)
208 struct net_device *netdev = adapter->netdev;
209 int err, irq_flags = IRQF_SHARED;
210 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
211 struct ixgb_hw *hw = &adapter->hw;
213 /* hardware has been reset, we need to reload some things */
215 ixgb_rar_set(hw, netdev->dev_addr, 0);
216 ixgb_set_multi(netdev);
218 ixgb_restore_vlan(adapter);
220 ixgb_configure_tx(adapter);
221 ixgb_setup_rctl(adapter);
222 ixgb_configure_rx(adapter);
223 ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
225 /* disable interrupts and get the hardware into a known state */
226 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
228 /* only enable MSI if bus is in PCI-X mode */
229 if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
230 err = pci_enable_msi(adapter->pdev);
232 adapter->have_msi = 1;
235 /* proceed to try to request regular interrupt */
238 err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
239 netdev->name, netdev);
241 if (adapter->have_msi)
242 pci_disable_msi(adapter->pdev);
243 netif_err(adapter, probe, adapter->netdev,
244 "Unable to allocate interrupt Error: %d\n", err);
248 if ((hw->max_frame_size != max_frame) ||
249 (hw->max_frame_size !=
250 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
252 hw->max_frame_size = max_frame;
254 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
256 if (hw->max_frame_size >
257 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
258 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
260 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
261 ctrl0 |= IXGB_CTRL0_JFE;
262 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
267 clear_bit(__IXGB_DOWN, &adapter->flags);
269 napi_enable(&adapter->napi);
270 ixgb_irq_enable(adapter);
272 netif_wake_queue(netdev);
274 mod_timer(&adapter->watchdog_timer, jiffies);
280 ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
282 struct net_device *netdev = adapter->netdev;
284 /* prevent the interrupt handler from restarting watchdog */
285 set_bit(__IXGB_DOWN, &adapter->flags);
287 napi_disable(&adapter->napi);
288 /* waiting for NAPI to complete can re-enable interrupts */
289 ixgb_irq_disable(adapter);
290 free_irq(adapter->pdev->irq, netdev);
292 if (adapter->have_msi)
293 pci_disable_msi(adapter->pdev);
296 del_timer_sync(&adapter->watchdog_timer);
298 adapter->link_speed = 0;
299 adapter->link_duplex = 0;
300 netif_carrier_off(netdev);
301 netif_stop_queue(netdev);
304 ixgb_clean_tx_ring(adapter);
305 ixgb_clean_rx_ring(adapter);
309 ixgb_reset(struct ixgb_adapter *adapter)
311 struct ixgb_hw *hw = &adapter->hw;
313 ixgb_adapter_stop(hw);
314 if (!ixgb_init_hw(hw))
315 netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
317 /* restore frame size information */
318 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
319 if (hw->max_frame_size >
320 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
321 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
322 if (!(ctrl0 & IXGB_CTRL0_JFE)) {
323 ctrl0 |= IXGB_CTRL0_JFE;
324 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
329 static const struct net_device_ops ixgb_netdev_ops = {
330 .ndo_open = ixgb_open,
331 .ndo_stop = ixgb_close,
332 .ndo_start_xmit = ixgb_xmit_frame,
333 .ndo_get_stats = ixgb_get_stats,
334 .ndo_set_multicast_list = ixgb_set_multi,
335 .ndo_validate_addr = eth_validate_addr,
336 .ndo_set_mac_address = ixgb_set_mac,
337 .ndo_change_mtu = ixgb_change_mtu,
338 .ndo_tx_timeout = ixgb_tx_timeout,
339 .ndo_vlan_rx_register = ixgb_vlan_rx_register,
340 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
341 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
342 #ifdef CONFIG_NET_POLL_CONTROLLER
343 .ndo_poll_controller = ixgb_netpoll,
348 * ixgb_probe - Device Initialization Routine
349 * @pdev: PCI device information struct
350 * @ent: entry in ixgb_pci_tbl
352 * Returns 0 on success, negative on failure
354 * ixgb_probe initializes an adapter identified by a pci_dev structure.
355 * The OS initialization, configuring of the adapter private structure,
356 * and a hardware reset occur.
360 ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
362 struct net_device *netdev = NULL;
363 struct ixgb_adapter *adapter;
364 static int cards_found = 0;
369 err = pci_enable_device(pdev);
374 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
376 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
380 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
382 err = dma_set_coherent_mask(&pdev->dev,
385 pr_err("No usable DMA configuration, aborting\n");
391 err = pci_request_regions(pdev, ixgb_driver_name);
393 goto err_request_regions;
395 pci_set_master(pdev);
397 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
400 goto err_alloc_etherdev;
403 SET_NETDEV_DEV(netdev, &pdev->dev);
405 pci_set_drvdata(pdev, netdev);
406 adapter = netdev_priv(netdev);
407 adapter->netdev = netdev;
408 adapter->pdev = pdev;
409 adapter->hw.back = adapter;
410 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
412 adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
413 if (!adapter->hw.hw_addr) {
418 for (i = BAR_1; i <= BAR_5; i++) {
419 if (pci_resource_len(pdev, i) == 0)
421 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
422 adapter->hw.io_base = pci_resource_start(pdev, i);
427 netdev->netdev_ops = &ixgb_netdev_ops;
428 ixgb_set_ethtool_ops(netdev);
429 netdev->watchdog_timeo = 5 * HZ;
430 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
432 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
434 adapter->bd_number = cards_found;
435 adapter->link_speed = 0;
436 adapter->link_duplex = 0;
438 /* setup the private structure */
440 err = ixgb_sw_init(adapter);
444 netdev->features = NETIF_F_SG |
448 NETIF_F_HW_VLAN_FILTER;
449 netdev->features |= NETIF_F_TSO;
452 netdev->features |= NETIF_F_HIGHDMA;
453 netdev->vlan_features |= NETIF_F_HIGHDMA;
456 /* make sure the EEPROM is good */
458 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
459 netif_err(adapter, probe, adapter->netdev,
460 "The EEPROM Checksum Is Not Valid\n");
465 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
466 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
468 if (!is_valid_ether_addr(netdev->perm_addr)) {
469 netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
474 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
476 init_timer(&adapter->watchdog_timer);
477 adapter->watchdog_timer.function = ixgb_watchdog;
478 adapter->watchdog_timer.data = (unsigned long)adapter;
480 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
482 strcpy(netdev->name, "eth%d");
483 err = register_netdev(netdev);
487 /* carrier off reporting is important to ethtool even BEFORE open */
488 netif_carrier_off(netdev);
490 netif_info(adapter, probe, adapter->netdev,
491 "Intel(R) PRO/10GbE Network Connection\n");
492 ixgb_check_options(adapter);
493 /* reset the hardware with the new settings */
503 iounmap(adapter->hw.hw_addr);
507 pci_release_regions(pdev);
510 pci_disable_device(pdev);
515 * ixgb_remove - Device Removal Routine
516 * @pdev: PCI device information struct
518 * ixgb_remove is called by the PCI subsystem to alert the driver
519 * that it should release a PCI device. The could be caused by a
520 * Hot-Plug event, or because the driver is going to be removed from
524 static void __devexit
525 ixgb_remove(struct pci_dev *pdev)
527 struct net_device *netdev = pci_get_drvdata(pdev);
528 struct ixgb_adapter *adapter = netdev_priv(netdev);
530 cancel_work_sync(&adapter->tx_timeout_task);
532 unregister_netdev(netdev);
534 iounmap(adapter->hw.hw_addr);
535 pci_release_regions(pdev);
538 pci_disable_device(pdev);
542 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
543 * @adapter: board private structure to initialize
545 * ixgb_sw_init initializes the Adapter private data structure.
546 * Fields are initialized based on PCI device information and
547 * OS network device settings (MTU size).
551 ixgb_sw_init(struct ixgb_adapter *adapter)
553 struct ixgb_hw *hw = &adapter->hw;
554 struct net_device *netdev = adapter->netdev;
555 struct pci_dev *pdev = adapter->pdev;
557 /* PCI config space info */
559 hw->vendor_id = pdev->vendor;
560 hw->device_id = pdev->device;
561 hw->subsystem_vendor_id = pdev->subsystem_vendor;
562 hw->subsystem_id = pdev->subsystem_device;
564 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
565 adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
567 if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
568 (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
569 (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
570 (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
571 hw->mac_type = ixgb_82597;
573 /* should never have loaded on this device */
574 netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
577 /* enable flow control to be programmed */
580 set_bit(__IXGB_DOWN, &adapter->flags);
585 * ixgb_open - Called when a network interface is made active
586 * @netdev: network interface device structure
588 * Returns 0 on success, negative value on failure
590 * The open entry point is called when a network interface is made
591 * active by the system (IFF_UP). At this point all resources needed
592 * for transmit and receive operations are allocated, the interrupt
593 * handler is registered with the OS, the watchdog timer is started,
594 * and the stack is notified that the interface is ready.
598 ixgb_open(struct net_device *netdev)
600 struct ixgb_adapter *adapter = netdev_priv(netdev);
603 /* allocate transmit descriptors */
604 err = ixgb_setup_tx_resources(adapter);
608 netif_carrier_off(netdev);
610 /* allocate receive descriptors */
612 err = ixgb_setup_rx_resources(adapter);
616 err = ixgb_up(adapter);
620 netif_start_queue(netdev);
625 ixgb_free_rx_resources(adapter);
627 ixgb_free_tx_resources(adapter);
635 * ixgb_close - Disables a network interface
636 * @netdev: network interface device structure
638 * Returns 0, this is not allowed to fail
640 * The close entry point is called when an interface is de-activated
641 * by the OS. The hardware is still under the drivers control, but
642 * needs to be disabled. A global MAC reset is issued to stop the
643 * hardware, and all transmit and receive resources are freed.
647 ixgb_close(struct net_device *netdev)
649 struct ixgb_adapter *adapter = netdev_priv(netdev);
651 ixgb_down(adapter, true);
653 ixgb_free_tx_resources(adapter);
654 ixgb_free_rx_resources(adapter);
660 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
661 * @adapter: board private structure
663 * Return 0 on success, negative on failure
667 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
669 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
670 struct pci_dev *pdev = adapter->pdev;
673 size = sizeof(struct ixgb_buffer) * txdr->count;
674 txdr->buffer_info = vzalloc(size);
675 if (!txdr->buffer_info) {
676 netif_err(adapter, probe, adapter->netdev,
677 "Unable to allocate transmit descriptor ring memory\n");
681 /* round up to nearest 4K */
683 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
684 txdr->size = ALIGN(txdr->size, 4096);
686 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
689 vfree(txdr->buffer_info);
690 netif_err(adapter, probe, adapter->netdev,
691 "Unable to allocate transmit descriptor memory\n");
694 memset(txdr->desc, 0, txdr->size);
696 txdr->next_to_use = 0;
697 txdr->next_to_clean = 0;
703 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
704 * @adapter: board private structure
706 * Configure the Tx unit of the MAC after a reset.
710 ixgb_configure_tx(struct ixgb_adapter *adapter)
712 u64 tdba = adapter->tx_ring.dma;
713 u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
715 struct ixgb_hw *hw = &adapter->hw;
717 /* Setup the Base and Length of the Tx Descriptor Ring
718 * tx_ring.dma can be either a 32 or 64 bit value
721 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
722 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
724 IXGB_WRITE_REG(hw, TDLEN, tdlen);
726 /* Setup the HW Tx Head and Tail descriptor pointers */
728 IXGB_WRITE_REG(hw, TDH, 0);
729 IXGB_WRITE_REG(hw, TDT, 0);
731 /* don't set up txdctl, it induces performance problems if configured
733 /* Set the Tx Interrupt Delay register */
735 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
737 /* Program the Transmit Control Register */
739 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
740 IXGB_WRITE_REG(hw, TCTL, tctl);
742 /* Setup Transmit Descriptor Settings for this adapter */
743 adapter->tx_cmd_type =
745 (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
749 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
750 * @adapter: board private structure
752 * Returns 0 on success, negative on failure
756 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
758 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
759 struct pci_dev *pdev = adapter->pdev;
762 size = sizeof(struct ixgb_buffer) * rxdr->count;
763 rxdr->buffer_info = vzalloc(size);
764 if (!rxdr->buffer_info) {
765 netif_err(adapter, probe, adapter->netdev,
766 "Unable to allocate receive descriptor ring\n");
770 /* Round up to nearest 4K */
772 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
773 rxdr->size = ALIGN(rxdr->size, 4096);
775 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
779 vfree(rxdr->buffer_info);
780 netif_err(adapter, probe, adapter->netdev,
781 "Unable to allocate receive descriptors\n");
784 memset(rxdr->desc, 0, rxdr->size);
786 rxdr->next_to_clean = 0;
787 rxdr->next_to_use = 0;
793 * ixgb_setup_rctl - configure the receive control register
794 * @adapter: Board private structure
798 ixgb_setup_rctl(struct ixgb_adapter *adapter)
802 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
804 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
807 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
808 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
809 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
811 rctl |= IXGB_RCTL_SECRC;
813 if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
814 rctl |= IXGB_RCTL_BSIZE_2048;
815 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
816 rctl |= IXGB_RCTL_BSIZE_4096;
817 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
818 rctl |= IXGB_RCTL_BSIZE_8192;
819 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
820 rctl |= IXGB_RCTL_BSIZE_16384;
822 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
826 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
827 * @adapter: board private structure
829 * Configure the Rx unit of the MAC after a reset.
833 ixgb_configure_rx(struct ixgb_adapter *adapter)
835 u64 rdba = adapter->rx_ring.dma;
836 u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
837 struct ixgb_hw *hw = &adapter->hw;
841 /* make sure receives are disabled while setting up the descriptors */
843 rctl = IXGB_READ_REG(hw, RCTL);
844 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
846 /* set the Receive Delay Timer Register */
848 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
850 /* Setup the Base and Length of the Rx Descriptor Ring */
852 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
853 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
855 IXGB_WRITE_REG(hw, RDLEN, rdlen);
857 /* Setup the HW Rx Head and Tail Descriptor Pointers */
858 IXGB_WRITE_REG(hw, RDH, 0);
859 IXGB_WRITE_REG(hw, RDT, 0);
861 /* due to the hardware errata with RXDCTL, we are unable to use any of
862 * the performance enhancing features of it without causing other
863 * subtle bugs, some of the bugs could include receive length
864 * corruption at high data rates (WTHRESH > 0) and/or receive
865 * descriptor ring irregularites (particularly in hardware cache) */
866 IXGB_WRITE_REG(hw, RXDCTL, 0);
868 /* Enable Receive Checksum Offload for TCP and UDP */
869 if (adapter->rx_csum) {
870 rxcsum = IXGB_READ_REG(hw, RXCSUM);
871 rxcsum |= IXGB_RXCSUM_TUOFL;
872 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
875 /* Enable Receives */
877 IXGB_WRITE_REG(hw, RCTL, rctl);
881 * ixgb_free_tx_resources - Free Tx Resources
882 * @adapter: board private structure
884 * Free all transmit software resources
888 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
890 struct pci_dev *pdev = adapter->pdev;
892 ixgb_clean_tx_ring(adapter);
894 vfree(adapter->tx_ring.buffer_info);
895 adapter->tx_ring.buffer_info = NULL;
897 dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
898 adapter->tx_ring.desc, adapter->tx_ring.dma);
900 adapter->tx_ring.desc = NULL;
904 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
905 struct ixgb_buffer *buffer_info)
907 if (buffer_info->dma) {
908 if (buffer_info->mapped_as_page)
909 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
910 buffer_info->length, DMA_TO_DEVICE);
912 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
913 buffer_info->length, DMA_TO_DEVICE);
914 buffer_info->dma = 0;
917 if (buffer_info->skb) {
918 dev_kfree_skb_any(buffer_info->skb);
919 buffer_info->skb = NULL;
921 buffer_info->time_stamp = 0;
922 /* these fields must always be initialized in tx
923 * buffer_info->length = 0;
924 * buffer_info->next_to_watch = 0; */
928 * ixgb_clean_tx_ring - Free Tx Buffers
929 * @adapter: board private structure
933 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
935 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
936 struct ixgb_buffer *buffer_info;
940 /* Free all the Tx ring sk_buffs */
942 for (i = 0; i < tx_ring->count; i++) {
943 buffer_info = &tx_ring->buffer_info[i];
944 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
947 size = sizeof(struct ixgb_buffer) * tx_ring->count;
948 memset(tx_ring->buffer_info, 0, size);
950 /* Zero out the descriptor ring */
952 memset(tx_ring->desc, 0, tx_ring->size);
954 tx_ring->next_to_use = 0;
955 tx_ring->next_to_clean = 0;
957 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
958 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
962 * ixgb_free_rx_resources - Free Rx Resources
963 * @adapter: board private structure
965 * Free all receive software resources
969 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
971 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
972 struct pci_dev *pdev = adapter->pdev;
974 ixgb_clean_rx_ring(adapter);
976 vfree(rx_ring->buffer_info);
977 rx_ring->buffer_info = NULL;
979 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
982 rx_ring->desc = NULL;
986 * ixgb_clean_rx_ring - Free Rx Buffers
987 * @adapter: board private structure
991 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
993 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
994 struct ixgb_buffer *buffer_info;
995 struct pci_dev *pdev = adapter->pdev;
999 /* Free all the Rx ring sk_buffs */
1001 for (i = 0; i < rx_ring->count; i++) {
1002 buffer_info = &rx_ring->buffer_info[i];
1003 if (buffer_info->dma) {
1004 dma_unmap_single(&pdev->dev,
1006 buffer_info->length,
1008 buffer_info->dma = 0;
1009 buffer_info->length = 0;
1012 if (buffer_info->skb) {
1013 dev_kfree_skb(buffer_info->skb);
1014 buffer_info->skb = NULL;
1018 size = sizeof(struct ixgb_buffer) * rx_ring->count;
1019 memset(rx_ring->buffer_info, 0, size);
1021 /* Zero out the descriptor ring */
1023 memset(rx_ring->desc, 0, rx_ring->size);
1025 rx_ring->next_to_clean = 0;
1026 rx_ring->next_to_use = 0;
1028 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1029 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1033 * ixgb_set_mac - Change the Ethernet Address of the NIC
1034 * @netdev: network interface device structure
1035 * @p: pointer to an address structure
1037 * Returns 0 on success, negative on failure
1041 ixgb_set_mac(struct net_device *netdev, void *p)
1043 struct ixgb_adapter *adapter = netdev_priv(netdev);
1044 struct sockaddr *addr = p;
1046 if (!is_valid_ether_addr(addr->sa_data))
1047 return -EADDRNOTAVAIL;
1049 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1051 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1057 * ixgb_set_multi - Multicast and Promiscuous mode set
1058 * @netdev: network interface device structure
1060 * The set_multi entry point is called whenever the multicast address
1061 * list or the network interface flags are updated. This routine is
1062 * responsible for configuring the hardware for proper multicast,
1063 * promiscuous mode, and all-multi behavior.
1067 ixgb_set_multi(struct net_device *netdev)
1069 struct ixgb_adapter *adapter = netdev_priv(netdev);
1070 struct ixgb_hw *hw = &adapter->hw;
1071 struct netdev_hw_addr *ha;
1075 /* Check for Promiscuous and All Multicast modes */
1077 rctl = IXGB_READ_REG(hw, RCTL);
1079 if (netdev->flags & IFF_PROMISC) {
1080 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1081 /* disable VLAN filtering */
1082 rctl &= ~IXGB_RCTL_CFIEN;
1083 rctl &= ~IXGB_RCTL_VFE;
1085 if (netdev->flags & IFF_ALLMULTI) {
1086 rctl |= IXGB_RCTL_MPE;
1087 rctl &= ~IXGB_RCTL_UPE;
1089 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1091 /* enable VLAN filtering */
1092 rctl |= IXGB_RCTL_VFE;
1093 rctl &= ~IXGB_RCTL_CFIEN;
1096 if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1097 rctl |= IXGB_RCTL_MPE;
1098 IXGB_WRITE_REG(hw, RCTL, rctl);
1100 u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
1101 IXGB_ETH_LENGTH_OF_ADDRESS];
1103 IXGB_WRITE_REG(hw, RCTL, rctl);
1106 netdev_for_each_mc_addr(ha, netdev)
1107 memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
1108 ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1110 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1113 if (netdev->features & NETIF_F_HW_VLAN_RX)
1114 ixgb_vlan_strip_enable(adapter);
1116 ixgb_vlan_strip_disable(adapter);
1121 * ixgb_watchdog - Timer Call-back
1122 * @data: pointer to netdev cast into an unsigned long
1126 ixgb_watchdog(unsigned long data)
1128 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1129 struct net_device *netdev = adapter->netdev;
1130 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1132 ixgb_check_for_link(&adapter->hw);
1134 if (ixgb_check_for_bad_link(&adapter->hw)) {
1135 /* force the reset path */
1136 netif_stop_queue(netdev);
1139 if (adapter->hw.link_up) {
1140 if (!netif_carrier_ok(netdev)) {
1142 "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1143 (adapter->hw.fc.type == ixgb_fc_full) ?
1145 (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1147 (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1149 adapter->link_speed = 10000;
1150 adapter->link_duplex = FULL_DUPLEX;
1151 netif_carrier_on(netdev);
1154 if (netif_carrier_ok(netdev)) {
1155 adapter->link_speed = 0;
1156 adapter->link_duplex = 0;
1157 netdev_info(netdev, "NIC Link is Down\n");
1158 netif_carrier_off(netdev);
1162 ixgb_update_stats(adapter);
1164 if (!netif_carrier_ok(netdev)) {
1165 if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1166 /* We've lost link, so the controller stops DMA,
1167 * but we've got queued Tx work that's never going
1168 * to get done, so reset controller to flush Tx.
1169 * (Do the reset outside of interrupt context). */
1170 schedule_work(&adapter->tx_timeout_task);
1171 /* return immediately since reset is imminent */
1176 /* Force detection of hung controller every watchdog period */
1177 adapter->detect_tx_hung = true;
1179 /* generate an interrupt to force clean up of any stragglers */
1180 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1182 /* Reset the timer */
1183 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1186 #define IXGB_TX_FLAGS_CSUM 0x00000001
1187 #define IXGB_TX_FLAGS_VLAN 0x00000002
1188 #define IXGB_TX_FLAGS_TSO 0x00000004
1191 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1193 struct ixgb_context_desc *context_desc;
1195 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1196 u16 ipcse, tucse, mss;
1199 if (likely(skb_is_gso(skb))) {
1200 struct ixgb_buffer *buffer_info;
1203 if (skb_header_cloned(skb)) {
1204 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1209 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1210 mss = skb_shinfo(skb)->gso_size;
1214 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1217 ipcss = skb_network_offset(skb);
1218 ipcso = (void *)&(iph->check) - (void *)skb->data;
1219 ipcse = skb_transport_offset(skb) - 1;
1220 tucss = skb_transport_offset(skb);
1221 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1224 i = adapter->tx_ring.next_to_use;
1225 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1226 buffer_info = &adapter->tx_ring.buffer_info[i];
1227 WARN_ON(buffer_info->dma != 0);
1229 context_desc->ipcss = ipcss;
1230 context_desc->ipcso = ipcso;
1231 context_desc->ipcse = cpu_to_le16(ipcse);
1232 context_desc->tucss = tucss;
1233 context_desc->tucso = tucso;
1234 context_desc->tucse = cpu_to_le16(tucse);
1235 context_desc->mss = cpu_to_le16(mss);
1236 context_desc->hdr_len = hdr_len;
1237 context_desc->status = 0;
1238 context_desc->cmd_type_len = cpu_to_le32(
1239 IXGB_CONTEXT_DESC_TYPE
1240 | IXGB_CONTEXT_DESC_CMD_TSE
1241 | IXGB_CONTEXT_DESC_CMD_IP
1242 | IXGB_CONTEXT_DESC_CMD_TCP
1243 | IXGB_CONTEXT_DESC_CMD_IDE
1244 | (skb->len - (hdr_len)));
1247 if (++i == adapter->tx_ring.count) i = 0;
1248 adapter->tx_ring.next_to_use = i;
1257 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1259 struct ixgb_context_desc *context_desc;
1263 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1264 struct ixgb_buffer *buffer_info;
1265 css = skb_checksum_start_offset(skb);
1266 cso = css + skb->csum_offset;
1268 i = adapter->tx_ring.next_to_use;
1269 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1270 buffer_info = &adapter->tx_ring.buffer_info[i];
1271 WARN_ON(buffer_info->dma != 0);
1273 context_desc->tucss = css;
1274 context_desc->tucso = cso;
1275 context_desc->tucse = 0;
1276 /* zero out any previously existing data in one instruction */
1277 *(u32 *)&(context_desc->ipcss) = 0;
1278 context_desc->status = 0;
1279 context_desc->hdr_len = 0;
1280 context_desc->mss = 0;
1281 context_desc->cmd_type_len =
1282 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1283 | IXGB_TX_DESC_CMD_IDE);
1285 if (++i == adapter->tx_ring.count) i = 0;
1286 adapter->tx_ring.next_to_use = i;
1294 #define IXGB_MAX_TXD_PWR 14
1295 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1298 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1301 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1302 struct pci_dev *pdev = adapter->pdev;
1303 struct ixgb_buffer *buffer_info;
1304 int len = skb_headlen(skb);
1305 unsigned int offset = 0, size, count = 0, i;
1306 unsigned int mss = skb_shinfo(skb)->gso_size;
1307 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1310 i = tx_ring->next_to_use;
1313 buffer_info = &tx_ring->buffer_info[i];
1314 size = min(len, IXGB_MAX_DATA_PER_TXD);
1315 /* Workaround for premature desc write-backs
1316 * in TSO mode. Append 4-byte sentinel desc */
1317 if (unlikely(mss && !nr_frags && size == len && size > 8))
1320 buffer_info->length = size;
1321 WARN_ON(buffer_info->dma != 0);
1322 buffer_info->time_stamp = jiffies;
1323 buffer_info->mapped_as_page = false;
1324 buffer_info->dma = dma_map_single(&pdev->dev,
1326 size, DMA_TO_DEVICE);
1327 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1329 buffer_info->next_to_watch = 0;
1336 if (i == tx_ring->count)
1341 for (f = 0; f < nr_frags; f++) {
1342 struct skb_frag_struct *frag;
1344 frag = &skb_shinfo(skb)->frags[f];
1346 offset = frag->page_offset;
1350 if (i == tx_ring->count)
1353 buffer_info = &tx_ring->buffer_info[i];
1354 size = min(len, IXGB_MAX_DATA_PER_TXD);
1356 /* Workaround for premature desc write-backs
1357 * in TSO mode. Append 4-byte sentinel desc */
1358 if (unlikely(mss && (f == (nr_frags - 1))
1359 && size == len && size > 8))
1362 buffer_info->length = size;
1363 buffer_info->time_stamp = jiffies;
1364 buffer_info->mapped_as_page = true;
1366 dma_map_page(&pdev->dev, frag->page,
1367 offset, size, DMA_TO_DEVICE);
1368 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1370 buffer_info->next_to_watch = 0;
1377 tx_ring->buffer_info[i].skb = skb;
1378 tx_ring->buffer_info[first].next_to_watch = i;
1383 dev_err(&pdev->dev, "TX DMA map failed\n");
1384 buffer_info->dma = 0;
1390 i += tx_ring->count;
1392 buffer_info = &tx_ring->buffer_info[i];
1393 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1400 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1402 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1403 struct ixgb_tx_desc *tx_desc = NULL;
1404 struct ixgb_buffer *buffer_info;
1405 u32 cmd_type_len = adapter->tx_cmd_type;
1410 if (tx_flags & IXGB_TX_FLAGS_TSO) {
1411 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1412 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1415 if (tx_flags & IXGB_TX_FLAGS_CSUM)
1416 popts |= IXGB_TX_DESC_POPTS_TXSM;
1418 if (tx_flags & IXGB_TX_FLAGS_VLAN)
1419 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1421 i = tx_ring->next_to_use;
1424 buffer_info = &tx_ring->buffer_info[i];
1425 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1426 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1427 tx_desc->cmd_type_len =
1428 cpu_to_le32(cmd_type_len | buffer_info->length);
1429 tx_desc->status = status;
1430 tx_desc->popts = popts;
1431 tx_desc->vlan = cpu_to_le16(vlan_id);
1433 if (++i == tx_ring->count) i = 0;
1436 tx_desc->cmd_type_len |=
1437 cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1439 /* Force memory writes to complete before letting h/w
1440 * know there are new descriptors to fetch. (Only
1441 * applicable for weak-ordered memory model archs,
1442 * such as IA-64). */
1445 tx_ring->next_to_use = i;
1446 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1449 static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1451 struct ixgb_adapter *adapter = netdev_priv(netdev);
1452 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1454 netif_stop_queue(netdev);
1455 /* Herbert's original patch had:
1456 * smp_mb__after_netif_stop_queue();
1457 * but since that doesn't exist yet, just open code it. */
1460 /* We need to check again in a case another CPU has just
1461 * made room available. */
1462 if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1466 netif_start_queue(netdev);
1467 ++adapter->restart_queue;
1471 static int ixgb_maybe_stop_tx(struct net_device *netdev,
1472 struct ixgb_desc_ring *tx_ring, int size)
1474 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1476 return __ixgb_maybe_stop_tx(netdev, size);
1480 /* Tx Descriptors needed, worst case */
1481 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1482 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1483 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1484 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1485 + 1 /* one more needed for sentinel TSO workaround */
1488 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1490 struct ixgb_adapter *adapter = netdev_priv(netdev);
1492 unsigned int tx_flags = 0;
1497 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1499 return NETDEV_TX_OK;
1502 if (skb->len <= 0) {
1504 return NETDEV_TX_OK;
1507 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1509 return NETDEV_TX_BUSY;
1511 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1512 tx_flags |= IXGB_TX_FLAGS_VLAN;
1513 vlan_id = vlan_tx_tag_get(skb);
1516 first = adapter->tx_ring.next_to_use;
1518 tso = ixgb_tso(adapter, skb);
1521 return NETDEV_TX_OK;
1525 tx_flags |= IXGB_TX_FLAGS_TSO;
1526 else if (ixgb_tx_csum(adapter, skb))
1527 tx_flags |= IXGB_TX_FLAGS_CSUM;
1529 count = ixgb_tx_map(adapter, skb, first);
1532 ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1533 /* Make sure there is space in the ring for the next send. */
1534 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1537 dev_kfree_skb_any(skb);
1538 adapter->tx_ring.buffer_info[first].time_stamp = 0;
1539 adapter->tx_ring.next_to_use = first;
1542 return NETDEV_TX_OK;
1546 * ixgb_tx_timeout - Respond to a Tx Hang
1547 * @netdev: network interface device structure
1551 ixgb_tx_timeout(struct net_device *netdev)
1553 struct ixgb_adapter *adapter = netdev_priv(netdev);
1555 /* Do the reset outside of interrupt context */
1556 schedule_work(&adapter->tx_timeout_task);
1560 ixgb_tx_timeout_task(struct work_struct *work)
1562 struct ixgb_adapter *adapter =
1563 container_of(work, struct ixgb_adapter, tx_timeout_task);
1565 adapter->tx_timeout_count++;
1566 ixgb_down(adapter, true);
1571 * ixgb_get_stats - Get System Network Statistics
1572 * @netdev: network interface device structure
1574 * Returns the address of the device statistics structure.
1575 * The statistics are actually updated from the timer callback.
1578 static struct net_device_stats *
1579 ixgb_get_stats(struct net_device *netdev)
1581 return &netdev->stats;
1585 * ixgb_change_mtu - Change the Maximum Transfer Unit
1586 * @netdev: network interface device structure
1587 * @new_mtu: new value for maximum frame size
1589 * Returns 0 on success, negative on failure
1593 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1595 struct ixgb_adapter *adapter = netdev_priv(netdev);
1596 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1597 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1599 /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
1600 if ((new_mtu < 68) ||
1601 (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1602 netif_err(adapter, probe, adapter->netdev,
1603 "Invalid MTU setting %d\n", new_mtu);
1607 if (old_max_frame == max_frame)
1610 if (netif_running(netdev))
1611 ixgb_down(adapter, true);
1613 adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1615 netdev->mtu = new_mtu;
1617 if (netif_running(netdev))
1624 * ixgb_update_stats - Update the board statistics counters.
1625 * @adapter: board private structure
1629 ixgb_update_stats(struct ixgb_adapter *adapter)
1631 struct net_device *netdev = adapter->netdev;
1632 struct pci_dev *pdev = adapter->pdev;
1634 /* Prevent stats update while adapter is being reset */
1635 if (pci_channel_offline(pdev))
1638 if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1639 (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1640 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1641 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1642 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1643 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1645 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1646 /* fix up multicast stats by removing broadcasts */
1650 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1651 adapter->stats.mprch += (multi >> 32);
1652 adapter->stats.bprcl += bcast_l;
1653 adapter->stats.bprch += bcast_h;
1655 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1656 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1657 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1658 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1660 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1661 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1662 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1663 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1664 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1665 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1666 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1667 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1668 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1669 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1670 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1671 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1672 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1673 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1674 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1675 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1676 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1677 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1678 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1679 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1680 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1681 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1682 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1683 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1684 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1685 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1686 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1687 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1688 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1689 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1690 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1691 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1692 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1693 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1694 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1695 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1696 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1697 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1698 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1699 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1700 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1701 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1702 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1703 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1704 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1705 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1706 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1707 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1708 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1709 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1710 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1711 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1712 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1713 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1714 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1715 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1717 /* Fill out the OS statistics structure */
1719 netdev->stats.rx_packets = adapter->stats.gprcl;
1720 netdev->stats.tx_packets = adapter->stats.gptcl;
1721 netdev->stats.rx_bytes = adapter->stats.gorcl;
1722 netdev->stats.tx_bytes = adapter->stats.gotcl;
1723 netdev->stats.multicast = adapter->stats.mprcl;
1724 netdev->stats.collisions = 0;
1726 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1727 * with a length in the type/len field */
1728 netdev->stats.rx_errors =
1729 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1730 adapter->stats.ruc +
1731 adapter->stats.roc /*+ adapter->stats.rlec */ +
1732 adapter->stats.icbc +
1733 adapter->stats.ecbc + adapter->stats.mpc;
1736 * netdev->stats.rx_length_errors = adapter->stats.rlec;
1739 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1740 netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1741 netdev->stats.rx_missed_errors = adapter->stats.mpc;
1742 netdev->stats.rx_over_errors = adapter->stats.mpc;
1744 netdev->stats.tx_errors = 0;
1745 netdev->stats.rx_frame_errors = 0;
1746 netdev->stats.tx_aborted_errors = 0;
1747 netdev->stats.tx_carrier_errors = 0;
1748 netdev->stats.tx_fifo_errors = 0;
1749 netdev->stats.tx_heartbeat_errors = 0;
1750 netdev->stats.tx_window_errors = 0;
1753 #define IXGB_MAX_INTR 10
1755 * ixgb_intr - Interrupt Handler
1756 * @irq: interrupt number
1757 * @data: pointer to a network interface device structure
1761 ixgb_intr(int irq, void *data)
1763 struct net_device *netdev = data;
1764 struct ixgb_adapter *adapter = netdev_priv(netdev);
1765 struct ixgb_hw *hw = &adapter->hw;
1766 u32 icr = IXGB_READ_REG(hw, ICR);
1769 return IRQ_NONE; /* Not our interrupt */
1771 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1772 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1773 mod_timer(&adapter->watchdog_timer, jiffies);
1775 if (napi_schedule_prep(&adapter->napi)) {
1777 /* Disable interrupts and register for poll. The flush
1778 of the posted write is intentionally left out.
1781 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1782 __napi_schedule(&adapter->napi);
1788 * ixgb_clean - NAPI Rx polling callback
1789 * @adapter: board private structure
1793 ixgb_clean(struct napi_struct *napi, int budget)
1795 struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1798 ixgb_clean_tx_irq(adapter);
1799 ixgb_clean_rx_irq(adapter, &work_done, budget);
1801 /* If budget not fully consumed, exit the polling mode */
1802 if (work_done < budget) {
1803 napi_complete(napi);
1804 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1805 ixgb_irq_enable(adapter);
1812 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1813 * @adapter: board private structure
1817 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1819 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1820 struct net_device *netdev = adapter->netdev;
1821 struct ixgb_tx_desc *tx_desc, *eop_desc;
1822 struct ixgb_buffer *buffer_info;
1823 unsigned int i, eop;
1824 bool cleaned = false;
1826 i = tx_ring->next_to_clean;
1827 eop = tx_ring->buffer_info[i].next_to_watch;
1828 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1830 while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1832 rmb(); /* read buffer_info after eop_desc */
1833 for (cleaned = false; !cleaned; ) {
1834 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1835 buffer_info = &tx_ring->buffer_info[i];
1837 if (tx_desc->popts &
1838 (IXGB_TX_DESC_POPTS_TXSM |
1839 IXGB_TX_DESC_POPTS_IXSM))
1840 adapter->hw_csum_tx_good++;
1842 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1844 *(u32 *)&(tx_desc->status) = 0;
1846 cleaned = (i == eop);
1847 if (++i == tx_ring->count) i = 0;
1850 eop = tx_ring->buffer_info[i].next_to_watch;
1851 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1854 tx_ring->next_to_clean = i;
1856 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1857 IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1858 /* Make sure that anybody stopping the queue after this
1859 * sees the new next_to_clean. */
1862 if (netif_queue_stopped(netdev) &&
1863 !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1864 netif_wake_queue(netdev);
1865 ++adapter->restart_queue;
1869 if (adapter->detect_tx_hung) {
1870 /* detect a transmit hang in hardware, this serializes the
1871 * check with the clearing of time_stamp and movement of i */
1872 adapter->detect_tx_hung = false;
1873 if (tx_ring->buffer_info[eop].time_stamp &&
1874 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1875 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1876 IXGB_STATUS_TXOFF)) {
1877 /* detected Tx unit hang */
1878 netif_err(adapter, drv, adapter->netdev,
1879 "Detected Tx Unit Hang\n"
1882 " next_to_use <%x>\n"
1883 " next_to_clean <%x>\n"
1884 "buffer_info[next_to_clean]\n"
1885 " time_stamp <%lx>\n"
1886 " next_to_watch <%x>\n"
1888 " next_to_watch.status <%x>\n",
1889 IXGB_READ_REG(&adapter->hw, TDH),
1890 IXGB_READ_REG(&adapter->hw, TDT),
1891 tx_ring->next_to_use,
1892 tx_ring->next_to_clean,
1893 tx_ring->buffer_info[eop].time_stamp,
1897 netif_stop_queue(netdev);
1905 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1906 * @adapter: board private structure
1907 * @rx_desc: receive descriptor
1908 * @sk_buff: socket buffer with received data
1912 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1913 struct ixgb_rx_desc *rx_desc,
1914 struct sk_buff *skb)
1916 /* Ignore Checksum bit is set OR
1917 * TCP Checksum has not been calculated
1919 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1920 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1921 skb_checksum_none_assert(skb);
1925 /* At this point we know the hardware did the TCP checksum */
1926 /* now look at the TCP checksum error bit */
1927 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1928 /* let the stack verify checksum errors */
1929 skb_checksum_none_assert(skb);
1930 adapter->hw_csum_rx_error++;
1932 /* TCP checksum is good */
1933 skb->ip_summed = CHECKSUM_UNNECESSARY;
1934 adapter->hw_csum_rx_good++;
1939 * this should improve performance for small packets with large amounts
1940 * of reassembly being done in the stack
1942 static void ixgb_check_copybreak(struct net_device *netdev,
1943 struct ixgb_buffer *buffer_info,
1944 u32 length, struct sk_buff **skb)
1946 struct sk_buff *new_skb;
1948 if (length > copybreak)
1951 new_skb = netdev_alloc_skb_ip_align(netdev, length);
1955 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1956 (*skb)->data - NET_IP_ALIGN,
1957 length + NET_IP_ALIGN);
1958 /* save the skb in buffer_info as good */
1959 buffer_info->skb = *skb;
1964 * ixgb_clean_rx_irq - Send received data up the network stack,
1965 * @adapter: board private structure
1969 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1971 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1972 struct net_device *netdev = adapter->netdev;
1973 struct pci_dev *pdev = adapter->pdev;
1974 struct ixgb_rx_desc *rx_desc, *next_rxd;
1975 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1978 int cleaned_count = 0;
1979 bool cleaned = false;
1981 i = rx_ring->next_to_clean;
1982 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1983 buffer_info = &rx_ring->buffer_info[i];
1985 while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1986 struct sk_buff *skb;
1989 if (*work_done >= work_to_do)
1993 rmb(); /* read descriptor and rx_buffer_info after status DD */
1994 status = rx_desc->status;
1995 skb = buffer_info->skb;
1996 buffer_info->skb = NULL;
1998 prefetch(skb->data - NET_IP_ALIGN);
2000 if (++i == rx_ring->count)
2002 next_rxd = IXGB_RX_DESC(*rx_ring, i);
2006 if (j == rx_ring->count)
2008 next2_buffer = &rx_ring->buffer_info[j];
2009 prefetch(next2_buffer);
2011 next_buffer = &rx_ring->buffer_info[i];
2016 dma_unmap_single(&pdev->dev,
2018 buffer_info->length,
2020 buffer_info->dma = 0;
2022 length = le16_to_cpu(rx_desc->length);
2023 rx_desc->length = 0;
2025 if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
2027 /* All receives must fit into a single buffer */
2029 IXGB_DBG("Receive packet consumed multiple buffers "
2030 "length<%x>\n", length);
2032 dev_kfree_skb_irq(skb);
2036 if (unlikely(rx_desc->errors &
2037 (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2038 IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2039 dev_kfree_skb_irq(skb);
2043 ixgb_check_copybreak(netdev, buffer_info, length, &skb);
2046 skb_put(skb, length);
2048 /* Receive Checksum Offload */
2049 ixgb_rx_checksum(adapter, rx_desc, skb);
2051 skb->protocol = eth_type_trans(skb, netdev);
2052 if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2053 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2054 le16_to_cpu(rx_desc->special));
2056 netif_receive_skb(skb);
2060 /* clean up descriptor, might be written over by hw */
2061 rx_desc->status = 0;
2063 /* return some buffers to hardware, one at a time is too slow */
2064 if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2065 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2069 /* use prefetched values */
2071 buffer_info = next_buffer;
2074 rx_ring->next_to_clean = i;
2076 cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2078 ixgb_alloc_rx_buffers(adapter, cleaned_count);
2084 * ixgb_alloc_rx_buffers - Replace used receive buffers
2085 * @adapter: address of board private structure
2089 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2091 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2092 struct net_device *netdev = adapter->netdev;
2093 struct pci_dev *pdev = adapter->pdev;
2094 struct ixgb_rx_desc *rx_desc;
2095 struct ixgb_buffer *buffer_info;
2096 struct sk_buff *skb;
2100 i = rx_ring->next_to_use;
2101 buffer_info = &rx_ring->buffer_info[i];
2102 cleancount = IXGB_DESC_UNUSED(rx_ring);
2105 /* leave three descriptors unused */
2106 while (--cleancount > 2 && cleaned_count--) {
2107 /* recycle! its good for you */
2108 skb = buffer_info->skb;
2114 skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2115 if (unlikely(!skb)) {
2116 /* Better luck next round */
2117 adapter->alloc_rx_buff_failed++;
2121 buffer_info->skb = skb;
2122 buffer_info->length = adapter->rx_buffer_len;
2124 buffer_info->dma = dma_map_single(&pdev->dev,
2126 adapter->rx_buffer_len,
2129 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2130 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2131 /* guarantee DD bit not set now before h/w gets descriptor
2132 * this is the rest of the workaround for h/w double
2134 rx_desc->status = 0;
2137 if (++i == rx_ring->count) i = 0;
2138 buffer_info = &rx_ring->buffer_info[i];
2141 if (likely(rx_ring->next_to_use != i)) {
2142 rx_ring->next_to_use = i;
2143 if (unlikely(i-- == 0))
2144 i = (rx_ring->count - 1);
2146 /* Force memory writes to complete before letting h/w
2147 * know there are new descriptors to fetch. (Only
2148 * applicable for weak-ordered memory model archs, such
2151 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2156 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2158 * @param netdev network interface device structure
2159 * @param grp indicates to enable or disable tagging/stripping
2162 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2164 struct ixgb_adapter *adapter = netdev_priv(netdev);
2166 adapter->vlgrp = grp;
2170 ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2174 /* enable VLAN tag insert/strip */
2175 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2176 ctrl |= IXGB_CTRL0_VME;
2177 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2181 ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2185 /* disable VLAN tag insert/strip */
2186 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2187 ctrl &= ~IXGB_CTRL0_VME;
2188 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2192 ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2194 struct ixgb_adapter *adapter = netdev_priv(netdev);
2197 /* add VID to filter table */
2199 index = (vid >> 5) & 0x7F;
2200 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2201 vfta |= (1 << (vid & 0x1F));
2202 ixgb_write_vfta(&adapter->hw, index, vfta);
2206 ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2208 struct ixgb_adapter *adapter = netdev_priv(netdev);
2211 ixgb_irq_disable(adapter);
2213 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2215 /* don't enable interrupts unless we are UP */
2216 if (adapter->netdev->flags & IFF_UP)
2217 ixgb_irq_enable(adapter);
2219 /* remove VID from filter table */
2221 index = (vid >> 5) & 0x7F;
2222 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2223 vfta &= ~(1 << (vid & 0x1F));
2224 ixgb_write_vfta(&adapter->hw, index, vfta);
2228 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2230 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2232 if (adapter->vlgrp) {
2234 for (vid = 0; vid < VLAN_N_VID; vid++) {
2235 if (!vlan_group_get_device(adapter->vlgrp, vid))
2237 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2242 #ifdef CONFIG_NET_POLL_CONTROLLER
2244 * Polling 'interrupt' - used by things like netconsole to send skbs
2245 * without having to re-enable interrupts. It's not called while
2246 * the interrupt routine is executing.
2249 static void ixgb_netpoll(struct net_device *dev)
2251 struct ixgb_adapter *adapter = netdev_priv(dev);
2253 disable_irq(adapter->pdev->irq);
2254 ixgb_intr(adapter->pdev->irq, dev);
2255 enable_irq(adapter->pdev->irq);
2260 * ixgb_io_error_detected() - called when PCI error is detected
2261 * @pdev pointer to pci device with error
2262 * @state pci channel state after error
2264 * This callback is called by the PCI subsystem whenever
2265 * a PCI bus error is detected.
2267 static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2268 enum pci_channel_state state)
2270 struct net_device *netdev = pci_get_drvdata(pdev);
2271 struct ixgb_adapter *adapter = netdev_priv(netdev);
2273 netif_device_detach(netdev);
2275 if (state == pci_channel_io_perm_failure)
2276 return PCI_ERS_RESULT_DISCONNECT;
2278 if (netif_running(netdev))
2279 ixgb_down(adapter, true);
2281 pci_disable_device(pdev);
2283 /* Request a slot reset. */
2284 return PCI_ERS_RESULT_NEED_RESET;
2288 * ixgb_io_slot_reset - called after the pci bus has been reset.
2289 * @pdev pointer to pci device with error
2291 * This callback is called after the PCI bus has been reset.
2292 * Basically, this tries to restart the card from scratch.
2293 * This is a shortened version of the device probe/discovery code,
2294 * it resembles the first-half of the ixgb_probe() routine.
2296 static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2298 struct net_device *netdev = pci_get_drvdata(pdev);
2299 struct ixgb_adapter *adapter = netdev_priv(netdev);
2301 if (pci_enable_device(pdev)) {
2302 netif_err(adapter, probe, adapter->netdev,
2303 "Cannot re-enable PCI device after reset\n");
2304 return PCI_ERS_RESULT_DISCONNECT;
2307 /* Perform card reset only on one instance of the card */
2308 if (0 != PCI_FUNC (pdev->devfn))
2309 return PCI_ERS_RESULT_RECOVERED;
2311 pci_set_master(pdev);
2313 netif_carrier_off(netdev);
2314 netif_stop_queue(netdev);
2315 ixgb_reset(adapter);
2317 /* Make sure the EEPROM is good */
2318 if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2319 netif_err(adapter, probe, adapter->netdev,
2320 "After reset, the EEPROM checksum is not valid\n");
2321 return PCI_ERS_RESULT_DISCONNECT;
2323 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2324 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2326 if (!is_valid_ether_addr(netdev->perm_addr)) {
2327 netif_err(adapter, probe, adapter->netdev,
2328 "After reset, invalid MAC address\n");
2329 return PCI_ERS_RESULT_DISCONNECT;
2332 return PCI_ERS_RESULT_RECOVERED;
2336 * ixgb_io_resume - called when its OK to resume normal operations
2337 * @pdev pointer to pci device with error
2339 * The error recovery driver tells us that its OK to resume
2340 * normal operation. Implementation resembles the second-half
2341 * of the ixgb_probe() routine.
2343 static void ixgb_io_resume(struct pci_dev *pdev)
2345 struct net_device *netdev = pci_get_drvdata(pdev);
2346 struct ixgb_adapter *adapter = netdev_priv(netdev);
2348 pci_set_master(pdev);
2350 if (netif_running(netdev)) {
2351 if (ixgb_up(adapter)) {
2352 pr_err("can't bring device back up after reset\n");
2357 netif_device_attach(netdev);
2358 mod_timer(&adapter->watchdog_timer, jiffies);