1 /*******************************************************************************
3 Intel PRO/10GbE Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
31 char ixgb_driver_name[] = "ixgb";
32 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
34 #ifndef CONFIG_IXGB_NAPI
37 #define DRIVERNAPI "-NAPI"
39 #define DRV_VERSION "1.0.126-k2"DRIVERNAPI
40 char ixgb_driver_version[] = DRV_VERSION;
41 static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
43 /* ixgb_pci_tbl - PCI Device ID Table
45 * Wildcard entries (PCI_ANY_ID) should come last
46 * Last entry must be all 0s
48 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
49 * Class, Class Mask, private data (not used) }
51 static struct pci_device_id ixgb_pci_tbl[] = {
52 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
53 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
54 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
55 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
57 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
58 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
59 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
61 /* required last entry */
65 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
67 /* Local Function Prototypes */
69 int ixgb_up(struct ixgb_adapter *adapter);
70 void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
71 void ixgb_reset(struct ixgb_adapter *adapter);
72 int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
73 int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
74 void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
75 void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
76 void ixgb_update_stats(struct ixgb_adapter *adapter);
78 static int ixgb_init_module(void);
79 static void ixgb_exit_module(void);
80 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
81 static void __devexit ixgb_remove(struct pci_dev *pdev);
82 static int ixgb_sw_init(struct ixgb_adapter *adapter);
83 static int ixgb_open(struct net_device *netdev);
84 static int ixgb_close(struct net_device *netdev);
85 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
86 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
87 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
88 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
89 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
90 static void ixgb_set_multi(struct net_device *netdev);
91 static void ixgb_watchdog(unsigned long data);
92 static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
93 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
94 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
95 static int ixgb_set_mac(struct net_device *netdev, void *p);
96 static irqreturn_t ixgb_intr(int irq, void *data);
97 static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
99 #ifdef CONFIG_IXGB_NAPI
100 static int ixgb_clean(struct net_device *netdev, int *budget);
101 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
102 int *work_done, int work_to_do);
104 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
106 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
107 void ixgb_set_ethtool_ops(struct net_device *netdev);
108 static void ixgb_tx_timeout(struct net_device *dev);
109 static void ixgb_tx_timeout_task(struct work_struct *work);
110 static void ixgb_vlan_rx_register(struct net_device *netdev,
111 struct vlan_group *grp);
112 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
113 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
114 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
116 #ifdef CONFIG_NET_POLL_CONTROLLER
117 /* for netdump / net console */
118 static void ixgb_netpoll(struct net_device *dev);
121 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
122 enum pci_channel_state state);
123 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
124 static void ixgb_io_resume (struct pci_dev *pdev);
126 /* Exported from other modules */
127 extern void ixgb_check_options(struct ixgb_adapter *adapter);
129 static struct pci_error_handlers ixgb_err_handler = {
130 .error_detected = ixgb_io_error_detected,
131 .slot_reset = ixgb_io_slot_reset,
132 .resume = ixgb_io_resume,
135 static struct pci_driver ixgb_driver = {
136 .name = ixgb_driver_name,
137 .id_table = ixgb_pci_tbl,
139 .remove = __devexit_p(ixgb_remove),
140 .err_handler = &ixgb_err_handler
144 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
145 MODULE_LICENSE("GPL");
146 MODULE_VERSION(DRV_VERSION);
148 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
149 static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
150 module_param(debug, int, 0);
151 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
153 /* some defines for controlling descriptor fetches in h/w */
154 #define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */
155 #define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
157 #define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
158 * is pushed this many descriptors
162 * ixgb_init_module - Driver Registration Routine
164 * ixgb_init_module is the first routine called when the driver is
165 * loaded. All it does is register with the PCI subsystem.
169 ixgb_init_module(void)
171 printk(KERN_INFO "%s - version %s\n",
172 ixgb_driver_string, ixgb_driver_version);
174 printk(KERN_INFO "%s\n", ixgb_copyright);
176 return pci_register_driver(&ixgb_driver);
179 module_init(ixgb_init_module);
182 * ixgb_exit_module - Driver Exit Cleanup Routine
184 * ixgb_exit_module is called just before the driver is removed
189 ixgb_exit_module(void)
191 pci_unregister_driver(&ixgb_driver);
194 module_exit(ixgb_exit_module);
197 * ixgb_irq_disable - Mask off interrupt generation on the NIC
198 * @adapter: board private structure
202 ixgb_irq_disable(struct ixgb_adapter *adapter)
204 atomic_inc(&adapter->irq_sem);
205 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
206 IXGB_WRITE_FLUSH(&adapter->hw);
207 synchronize_irq(adapter->pdev->irq);
211 * ixgb_irq_enable - Enable default interrupt generation settings
212 * @adapter: board private structure
216 ixgb_irq_enable(struct ixgb_adapter *adapter)
218 if(atomic_dec_and_test(&adapter->irq_sem)) {
219 IXGB_WRITE_REG(&adapter->hw, IMS,
220 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
222 IXGB_WRITE_FLUSH(&adapter->hw);
227 ixgb_up(struct ixgb_adapter *adapter)
229 struct net_device *netdev = adapter->netdev;
231 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
232 struct ixgb_hw *hw = &adapter->hw;
234 /* hardware has been reset, we need to reload some things */
236 ixgb_rar_set(hw, netdev->dev_addr, 0);
237 ixgb_set_multi(netdev);
239 ixgb_restore_vlan(adapter);
241 ixgb_configure_tx(adapter);
242 ixgb_setup_rctl(adapter);
243 ixgb_configure_rx(adapter);
244 ixgb_alloc_rx_buffers(adapter);
246 /* disable interrupts and get the hardware into a known state */
247 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
249 #ifdef CONFIG_PCI_MSI
251 boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
252 IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
253 adapter->have_msi = TRUE;
256 adapter->have_msi = FALSE;
257 else if((err = pci_enable_msi(adapter->pdev))) {
259 "Unable to allocate MSI interrupt Error: %d\n", err);
260 adapter->have_msi = FALSE;
261 /* proceed to try to request regular interrupt */
266 if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
267 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
268 netdev->name, netdev))) {
270 "Unable to allocate interrupt Error: %d\n", err);
274 if((hw->max_frame_size != max_frame) ||
275 (hw->max_frame_size !=
276 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
278 hw->max_frame_size = max_frame;
280 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
282 if(hw->max_frame_size >
283 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
284 uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
286 if(!(ctrl0 & IXGB_CTRL0_JFE)) {
287 ctrl0 |= IXGB_CTRL0_JFE;
288 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
293 mod_timer(&adapter->watchdog_timer, jiffies);
295 #ifdef CONFIG_IXGB_NAPI
296 netif_poll_enable(netdev);
298 ixgb_irq_enable(adapter);
304 ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
306 struct net_device *netdev = adapter->netdev;
308 ixgb_irq_disable(adapter);
309 free_irq(adapter->pdev->irq, netdev);
310 #ifdef CONFIG_PCI_MSI
311 if(adapter->have_msi == TRUE)
312 pci_disable_msi(adapter->pdev);
316 del_timer_sync(&adapter->watchdog_timer);
317 #ifdef CONFIG_IXGB_NAPI
318 netif_poll_disable(netdev);
320 adapter->link_speed = 0;
321 adapter->link_duplex = 0;
322 netif_carrier_off(netdev);
323 netif_stop_queue(netdev);
326 ixgb_clean_tx_ring(adapter);
327 ixgb_clean_rx_ring(adapter);
331 ixgb_reset(struct ixgb_adapter *adapter)
334 ixgb_adapter_stop(&adapter->hw);
335 if(!ixgb_init_hw(&adapter->hw))
336 DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
340 * ixgb_probe - Device Initialization Routine
341 * @pdev: PCI device information struct
342 * @ent: entry in ixgb_pci_tbl
344 * Returns 0 on success, negative on failure
346 * ixgb_probe initializes an adapter identified by a pci_dev structure.
347 * The OS initialization, configuring of the adapter private structure,
348 * and a hardware reset occur.
352 ixgb_probe(struct pci_dev *pdev,
353 const struct pci_device_id *ent)
355 struct net_device *netdev = NULL;
356 struct ixgb_adapter *adapter;
357 static int cards_found = 0;
358 unsigned long mmio_start;
364 if((err = pci_enable_device(pdev)))
367 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
368 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
371 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
372 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
374 "ixgb: No usable DMA configuration, aborting\n");
380 if((err = pci_request_regions(pdev, ixgb_driver_name)))
381 goto err_request_regions;
383 pci_set_master(pdev);
385 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
388 goto err_alloc_etherdev;
391 SET_MODULE_OWNER(netdev);
392 SET_NETDEV_DEV(netdev, &pdev->dev);
394 pci_set_drvdata(pdev, netdev);
395 adapter = netdev_priv(netdev);
396 adapter->netdev = netdev;
397 adapter->pdev = pdev;
398 adapter->hw.back = adapter;
399 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
401 mmio_start = pci_resource_start(pdev, BAR_0);
402 mmio_len = pci_resource_len(pdev, BAR_0);
404 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
405 if(!adapter->hw.hw_addr) {
410 for(i = BAR_1; i <= BAR_5; i++) {
411 if(pci_resource_len(pdev, i) == 0)
413 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
414 adapter->hw.io_base = pci_resource_start(pdev, i);
419 netdev->open = &ixgb_open;
420 netdev->stop = &ixgb_close;
421 netdev->hard_start_xmit = &ixgb_xmit_frame;
422 netdev->get_stats = &ixgb_get_stats;
423 netdev->set_multicast_list = &ixgb_set_multi;
424 netdev->set_mac_address = &ixgb_set_mac;
425 netdev->change_mtu = &ixgb_change_mtu;
426 ixgb_set_ethtool_ops(netdev);
427 netdev->tx_timeout = &ixgb_tx_timeout;
428 netdev->watchdog_timeo = 5 * HZ;
429 #ifdef CONFIG_IXGB_NAPI
430 netdev->poll = &ixgb_clean;
433 netdev->vlan_rx_register = ixgb_vlan_rx_register;
434 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
435 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
436 #ifdef CONFIG_NET_POLL_CONTROLLER
437 netdev->poll_controller = ixgb_netpoll;
440 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
441 netdev->mem_start = mmio_start;
442 netdev->mem_end = mmio_start + mmio_len;
443 netdev->base_addr = adapter->hw.io_base;
445 adapter->bd_number = cards_found;
446 adapter->link_speed = 0;
447 adapter->link_duplex = 0;
449 /* setup the private structure */
451 if((err = ixgb_sw_init(adapter)))
454 netdev->features = NETIF_F_SG |
458 NETIF_F_HW_VLAN_FILTER;
459 netdev->features |= NETIF_F_TSO;
461 netdev->features |= NETIF_F_LLTX;
465 netdev->features |= NETIF_F_HIGHDMA;
467 /* make sure the EEPROM is good */
469 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
470 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
475 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
476 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
478 if(!is_valid_ether_addr(netdev->perm_addr)) {
479 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
484 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
486 init_timer(&adapter->watchdog_timer);
487 adapter->watchdog_timer.function = &ixgb_watchdog;
488 adapter->watchdog_timer.data = (unsigned long)adapter;
490 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
492 strcpy(netdev->name, "eth%d");
493 if((err = register_netdev(netdev)))
496 /* we're going to reset, so assume we have no link for now */
498 netif_carrier_off(netdev);
499 netif_stop_queue(netdev);
501 DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
502 ixgb_check_options(adapter);
503 /* reset the hardware with the new settings */
513 iounmap(adapter->hw.hw_addr);
517 pci_release_regions(pdev);
520 pci_disable_device(pdev);
525 * ixgb_remove - Device Removal Routine
526 * @pdev: PCI device information struct
528 * ixgb_remove is called by the PCI subsystem to alert the driver
529 * that it should release a PCI device. The could be caused by a
530 * Hot-Plug event, or because the driver is going to be removed from
534 static void __devexit
535 ixgb_remove(struct pci_dev *pdev)
537 struct net_device *netdev = pci_get_drvdata(pdev);
538 struct ixgb_adapter *adapter = netdev_priv(netdev);
540 unregister_netdev(netdev);
542 iounmap(adapter->hw.hw_addr);
543 pci_release_regions(pdev);
549 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
550 * @adapter: board private structure to initialize
552 * ixgb_sw_init initializes the Adapter private data structure.
553 * Fields are initialized based on PCI device information and
554 * OS network device settings (MTU size).
558 ixgb_sw_init(struct ixgb_adapter *adapter)
560 struct ixgb_hw *hw = &adapter->hw;
561 struct net_device *netdev = adapter->netdev;
562 struct pci_dev *pdev = adapter->pdev;
564 /* PCI config space info */
566 hw->vendor_id = pdev->vendor;
567 hw->device_id = pdev->device;
568 hw->subsystem_vendor_id = pdev->subsystem_vendor;
569 hw->subsystem_id = pdev->subsystem_device;
571 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
572 adapter->rx_buffer_len = hw->max_frame_size;
574 if((hw->device_id == IXGB_DEVICE_ID_82597EX)
575 || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
576 || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
577 || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
578 hw->mac_type = ixgb_82597;
580 /* should never have loaded on this device */
581 DPRINTK(PROBE, ERR, "unsupported device id\n");
584 /* enable flow control to be programmed */
587 atomic_set(&adapter->irq_sem, 1);
588 spin_lock_init(&adapter->tx_lock);
594 * ixgb_open - Called when a network interface is made active
595 * @netdev: network interface device structure
597 * Returns 0 on success, negative value on failure
599 * The open entry point is called when a network interface is made
600 * active by the system (IFF_UP). At this point all resources needed
601 * for transmit and receive operations are allocated, the interrupt
602 * handler is registered with the OS, the watchdog timer is started,
603 * and the stack is notified that the interface is ready.
607 ixgb_open(struct net_device *netdev)
609 struct ixgb_adapter *adapter = netdev_priv(netdev);
612 /* allocate transmit descriptors */
614 if((err = ixgb_setup_tx_resources(adapter)))
617 /* allocate receive descriptors */
619 if((err = ixgb_setup_rx_resources(adapter)))
622 if((err = ixgb_up(adapter)))
628 ixgb_free_rx_resources(adapter);
630 ixgb_free_tx_resources(adapter);
638 * ixgb_close - Disables a network interface
639 * @netdev: network interface device structure
641 * Returns 0, this is not allowed to fail
643 * The close entry point is called when an interface is de-activated
644 * by the OS. The hardware is still under the drivers control, but
645 * needs to be disabled. A global MAC reset is issued to stop the
646 * hardware, and all transmit and receive resources are freed.
650 ixgb_close(struct net_device *netdev)
652 struct ixgb_adapter *adapter = netdev_priv(netdev);
654 ixgb_down(adapter, TRUE);
656 ixgb_free_tx_resources(adapter);
657 ixgb_free_rx_resources(adapter);
663 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
664 * @adapter: board private structure
666 * Return 0 on success, negative on failure
670 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
672 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
673 struct pci_dev *pdev = adapter->pdev;
676 size = sizeof(struct ixgb_buffer) * txdr->count;
677 txdr->buffer_info = vmalloc(size);
678 if(!txdr->buffer_info) {
680 "Unable to allocate transmit descriptor ring memory\n");
683 memset(txdr->buffer_info, 0, size);
685 /* round up to nearest 4K */
687 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
688 txdr->size = ALIGN(txdr->size, 4096);
690 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
692 vfree(txdr->buffer_info);
694 "Unable to allocate transmit descriptor memory\n");
697 memset(txdr->desc, 0, txdr->size);
699 txdr->next_to_use = 0;
700 txdr->next_to_clean = 0;
706 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
707 * @adapter: board private structure
709 * Configure the Tx unit of the MAC after a reset.
713 ixgb_configure_tx(struct ixgb_adapter *adapter)
715 uint64_t tdba = adapter->tx_ring.dma;
716 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
718 struct ixgb_hw *hw = &adapter->hw;
720 /* Setup the Base and Length of the Tx Descriptor Ring
721 * tx_ring.dma can be either a 32 or 64 bit value
724 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
725 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
727 IXGB_WRITE_REG(hw, TDLEN, tdlen);
729 /* Setup the HW Tx Head and Tail descriptor pointers */
731 IXGB_WRITE_REG(hw, TDH, 0);
732 IXGB_WRITE_REG(hw, TDT, 0);
734 /* don't set up txdctl, it induces performance problems if configured
736 /* Set the Tx Interrupt Delay register */
738 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
740 /* Program the Transmit Control Register */
742 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
743 IXGB_WRITE_REG(hw, TCTL, tctl);
745 /* Setup Transmit Descriptor Settings for this adapter */
746 adapter->tx_cmd_type =
748 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
752 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
753 * @adapter: board private structure
755 * Returns 0 on success, negative on failure
759 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
761 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
762 struct pci_dev *pdev = adapter->pdev;
765 size = sizeof(struct ixgb_buffer) * rxdr->count;
766 rxdr->buffer_info = vmalloc(size);
767 if(!rxdr->buffer_info) {
769 "Unable to allocate receive descriptor ring\n");
772 memset(rxdr->buffer_info, 0, size);
774 /* Round up to nearest 4K */
776 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
777 rxdr->size = ALIGN(rxdr->size, 4096);
779 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
782 vfree(rxdr->buffer_info);
784 "Unable to allocate receive descriptors\n");
787 memset(rxdr->desc, 0, rxdr->size);
789 rxdr->next_to_clean = 0;
790 rxdr->next_to_use = 0;
796 * ixgb_setup_rctl - configure the receive control register
797 * @adapter: Board private structure
801 ixgb_setup_rctl(struct ixgb_adapter *adapter)
805 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
807 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
810 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
811 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
812 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
814 rctl |= IXGB_RCTL_SECRC;
816 if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
817 rctl |= IXGB_RCTL_BSIZE_2048;
818 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
819 rctl |= IXGB_RCTL_BSIZE_4096;
820 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
821 rctl |= IXGB_RCTL_BSIZE_8192;
822 else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
823 rctl |= IXGB_RCTL_BSIZE_16384;
825 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
829 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
830 * @adapter: board private structure
832 * Configure the Rx unit of the MAC after a reset.
836 ixgb_configure_rx(struct ixgb_adapter *adapter)
838 uint64_t rdba = adapter->rx_ring.dma;
839 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
840 struct ixgb_hw *hw = &adapter->hw;
845 /* make sure receives are disabled while setting up the descriptors */
847 rctl = IXGB_READ_REG(hw, RCTL);
848 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
850 /* set the Receive Delay Timer Register */
852 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
854 /* Setup the Base and Length of the Rx Descriptor Ring */
856 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
857 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
859 IXGB_WRITE_REG(hw, RDLEN, rdlen);
861 /* Setup the HW Rx Head and Tail Descriptor Pointers */
862 IXGB_WRITE_REG(hw, RDH, 0);
863 IXGB_WRITE_REG(hw, RDT, 0);
865 /* set up pre-fetching of receive buffers so we get some before we
866 * run out (default hardware behavior is to run out before fetching
867 * more). This sets up to fetch if HTHRESH rx descriptors are avail
868 * and the descriptors in hw cache are below PTHRESH. This avoids
869 * the hardware behavior of fetching <=512 descriptors in a single
870 * burst that pre-empts all other activity, usually causing fifo
872 /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
873 rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
874 RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
875 RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
876 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
878 /* Enable Receive Checksum Offload for TCP and UDP */
879 if(adapter->rx_csum == TRUE) {
880 rxcsum = IXGB_READ_REG(hw, RXCSUM);
881 rxcsum |= IXGB_RXCSUM_TUOFL;
882 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
885 /* Enable Receives */
887 IXGB_WRITE_REG(hw, RCTL, rctl);
891 * ixgb_free_tx_resources - Free Tx Resources
892 * @adapter: board private structure
894 * Free all transmit software resources
898 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
900 struct pci_dev *pdev = adapter->pdev;
902 ixgb_clean_tx_ring(adapter);
904 vfree(adapter->tx_ring.buffer_info);
905 adapter->tx_ring.buffer_info = NULL;
907 pci_free_consistent(pdev, adapter->tx_ring.size,
908 adapter->tx_ring.desc, adapter->tx_ring.dma);
910 adapter->tx_ring.desc = NULL;
914 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
915 struct ixgb_buffer *buffer_info)
917 struct pci_dev *pdev = adapter->pdev;
919 if (buffer_info->dma)
920 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
923 if (buffer_info->skb)
924 dev_kfree_skb_any(buffer_info->skb);
926 buffer_info->skb = NULL;
927 buffer_info->dma = 0;
928 buffer_info->time_stamp = 0;
929 /* these fields must always be initialized in tx
930 * buffer_info->length = 0;
931 * buffer_info->next_to_watch = 0; */
935 * ixgb_clean_tx_ring - Free Tx Buffers
936 * @adapter: board private structure
940 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
942 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
943 struct ixgb_buffer *buffer_info;
947 /* Free all the Tx ring sk_buffs */
949 for(i = 0; i < tx_ring->count; i++) {
950 buffer_info = &tx_ring->buffer_info[i];
951 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
954 size = sizeof(struct ixgb_buffer) * tx_ring->count;
955 memset(tx_ring->buffer_info, 0, size);
957 /* Zero out the descriptor ring */
959 memset(tx_ring->desc, 0, tx_ring->size);
961 tx_ring->next_to_use = 0;
962 tx_ring->next_to_clean = 0;
964 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
965 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
969 * ixgb_free_rx_resources - Free Rx Resources
970 * @adapter: board private structure
972 * Free all receive software resources
976 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
978 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
979 struct pci_dev *pdev = adapter->pdev;
981 ixgb_clean_rx_ring(adapter);
983 vfree(rx_ring->buffer_info);
984 rx_ring->buffer_info = NULL;
986 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
988 rx_ring->desc = NULL;
992 * ixgb_clean_rx_ring - Free Rx Buffers
993 * @adapter: board private structure
997 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
999 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1000 struct ixgb_buffer *buffer_info;
1001 struct pci_dev *pdev = adapter->pdev;
1005 /* Free all the Rx ring sk_buffs */
1007 for(i = 0; i < rx_ring->count; i++) {
1008 buffer_info = &rx_ring->buffer_info[i];
1009 if(buffer_info->skb) {
1011 pci_unmap_single(pdev,
1013 buffer_info->length,
1014 PCI_DMA_FROMDEVICE);
1016 dev_kfree_skb(buffer_info->skb);
1018 buffer_info->skb = NULL;
1022 size = sizeof(struct ixgb_buffer) * rx_ring->count;
1023 memset(rx_ring->buffer_info, 0, size);
1025 /* Zero out the descriptor ring */
1027 memset(rx_ring->desc, 0, rx_ring->size);
1029 rx_ring->next_to_clean = 0;
1030 rx_ring->next_to_use = 0;
1032 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1033 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1037 * ixgb_set_mac - Change the Ethernet Address of the NIC
1038 * @netdev: network interface device structure
1039 * @p: pointer to an address structure
1041 * Returns 0 on success, negative on failure
1045 ixgb_set_mac(struct net_device *netdev, void *p)
1047 struct ixgb_adapter *adapter = netdev_priv(netdev);
1048 struct sockaddr *addr = p;
1050 if(!is_valid_ether_addr(addr->sa_data))
1051 return -EADDRNOTAVAIL;
1053 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1055 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1061 * ixgb_set_multi - Multicast and Promiscuous mode set
1062 * @netdev: network interface device structure
1064 * The set_multi entry point is called whenever the multicast address
1065 * list or the network interface flags are updated. This routine is
1066 * responsible for configuring the hardware for proper multicast,
1067 * promiscuous mode, and all-multi behavior.
1071 ixgb_set_multi(struct net_device *netdev)
1073 struct ixgb_adapter *adapter = netdev_priv(netdev);
1074 struct ixgb_hw *hw = &adapter->hw;
1075 struct dev_mc_list *mc_ptr;
1079 /* Check for Promiscuous and All Multicast modes */
1081 rctl = IXGB_READ_REG(hw, RCTL);
1083 if(netdev->flags & IFF_PROMISC) {
1084 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1085 } else if(netdev->flags & IFF_ALLMULTI) {
1086 rctl |= IXGB_RCTL_MPE;
1087 rctl &= ~IXGB_RCTL_UPE;
1089 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1092 if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1093 rctl |= IXGB_RCTL_MPE;
1094 IXGB_WRITE_REG(hw, RCTL, rctl);
1096 uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
1098 IXGB_WRITE_REG(hw, RCTL, rctl);
1100 for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
1101 i++, mc_ptr = mc_ptr->next)
1102 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1103 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1105 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1110 * ixgb_watchdog - Timer Call-back
1111 * @data: pointer to netdev cast into an unsigned long
1115 ixgb_watchdog(unsigned long data)
1117 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1118 struct net_device *netdev = adapter->netdev;
1119 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1121 ixgb_check_for_link(&adapter->hw);
1123 if (ixgb_check_for_bad_link(&adapter->hw)) {
1124 /* force the reset path */
1125 netif_stop_queue(netdev);
1128 if(adapter->hw.link_up) {
1129 if(!netif_carrier_ok(netdev)) {
1131 "NIC Link is Up 10000 Mbps Full Duplex\n");
1132 adapter->link_speed = 10000;
1133 adapter->link_duplex = FULL_DUPLEX;
1134 netif_carrier_on(netdev);
1135 netif_wake_queue(netdev);
1138 if(netif_carrier_ok(netdev)) {
1139 adapter->link_speed = 0;
1140 adapter->link_duplex = 0;
1141 DPRINTK(LINK, INFO, "NIC Link is Down\n");
1142 netif_carrier_off(netdev);
1143 netif_stop_queue(netdev);
1148 ixgb_update_stats(adapter);
1150 if(!netif_carrier_ok(netdev)) {
1151 if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1152 /* We've lost link, so the controller stops DMA,
1153 * but we've got queued Tx work that's never going
1154 * to get done, so reset controller to flush Tx.
1155 * (Do the reset outside of interrupt context). */
1156 schedule_work(&adapter->tx_timeout_task);
1160 /* Force detection of hung controller every watchdog period */
1161 adapter->detect_tx_hung = TRUE;
1163 /* generate an interrupt to force clean up of any stragglers */
1164 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1166 /* Reset the timer */
1167 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1170 #define IXGB_TX_FLAGS_CSUM 0x00000001
1171 #define IXGB_TX_FLAGS_VLAN 0x00000002
1172 #define IXGB_TX_FLAGS_TSO 0x00000004
1175 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1177 struct ixgb_context_desc *context_desc;
1179 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1180 uint16_t ipcse, tucse, mss;
1183 if (likely(skb_is_gso(skb))) {
1184 struct ixgb_buffer *buffer_info;
1187 if (skb_header_cloned(skb)) {
1188 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1193 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1194 mss = skb_shinfo(skb)->gso_size;
1198 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1201 ipcss = skb_network_offset(skb);
1202 ipcso = (void *)&(iph->check) - (void *)skb->data;
1203 ipcse = skb_transport_offset(skb) - 1;
1204 tucss = skb_transport_offset(skb);
1205 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1208 i = adapter->tx_ring.next_to_use;
1209 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1210 buffer_info = &adapter->tx_ring.buffer_info[i];
1211 WARN_ON(buffer_info->dma != 0);
1213 context_desc->ipcss = ipcss;
1214 context_desc->ipcso = ipcso;
1215 context_desc->ipcse = cpu_to_le16(ipcse);
1216 context_desc->tucss = tucss;
1217 context_desc->tucso = tucso;
1218 context_desc->tucse = cpu_to_le16(tucse);
1219 context_desc->mss = cpu_to_le16(mss);
1220 context_desc->hdr_len = hdr_len;
1221 context_desc->status = 0;
1222 context_desc->cmd_type_len = cpu_to_le32(
1223 IXGB_CONTEXT_DESC_TYPE
1224 | IXGB_CONTEXT_DESC_CMD_TSE
1225 | IXGB_CONTEXT_DESC_CMD_IP
1226 | IXGB_CONTEXT_DESC_CMD_TCP
1227 | IXGB_CONTEXT_DESC_CMD_IDE
1228 | (skb->len - (hdr_len)));
1231 if(++i == adapter->tx_ring.count) i = 0;
1232 adapter->tx_ring.next_to_use = i;
1241 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1243 struct ixgb_context_desc *context_desc;
1247 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1248 struct ixgb_buffer *buffer_info;
1249 css = skb_transport_offset(skb);
1250 cso = css + skb->csum_offset;
1252 i = adapter->tx_ring.next_to_use;
1253 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1254 buffer_info = &adapter->tx_ring.buffer_info[i];
1255 WARN_ON(buffer_info->dma != 0);
1257 context_desc->tucss = css;
1258 context_desc->tucso = cso;
1259 context_desc->tucse = 0;
1260 /* zero out any previously existing data in one instruction */
1261 *(uint32_t *)&(context_desc->ipcss) = 0;
1262 context_desc->status = 0;
1263 context_desc->hdr_len = 0;
1264 context_desc->mss = 0;
1265 context_desc->cmd_type_len =
1266 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1267 | IXGB_TX_DESC_CMD_IDE);
1269 if(++i == adapter->tx_ring.count) i = 0;
1270 adapter->tx_ring.next_to_use = i;
1278 #define IXGB_MAX_TXD_PWR 14
1279 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1282 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1285 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1286 struct ixgb_buffer *buffer_info;
1288 unsigned int offset = 0, size, count = 0, i;
1289 unsigned int mss = skb_shinfo(skb)->gso_size;
1291 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1294 len -= skb->data_len;
1296 i = tx_ring->next_to_use;
1299 buffer_info = &tx_ring->buffer_info[i];
1300 size = min(len, IXGB_MAX_DATA_PER_TXD);
1301 /* Workaround for premature desc write-backs
1302 * in TSO mode. Append 4-byte sentinel desc */
1303 if (unlikely(mss && !nr_frags && size == len && size > 8))
1306 buffer_info->length = size;
1307 WARN_ON(buffer_info->dma != 0);
1309 pci_map_single(adapter->pdev,
1313 buffer_info->time_stamp = jiffies;
1314 buffer_info->next_to_watch = 0;
1319 if(++i == tx_ring->count) i = 0;
1322 for(f = 0; f < nr_frags; f++) {
1323 struct skb_frag_struct *frag;
1325 frag = &skb_shinfo(skb)->frags[f];
1330 buffer_info = &tx_ring->buffer_info[i];
1331 size = min(len, IXGB_MAX_DATA_PER_TXD);
1333 /* Workaround for premature desc write-backs
1334 * in TSO mode. Append 4-byte sentinel desc */
1335 if (unlikely(mss && !nr_frags && size == len
1339 buffer_info->length = size;
1341 pci_map_page(adapter->pdev,
1343 frag->page_offset + offset,
1346 buffer_info->time_stamp = jiffies;
1347 buffer_info->next_to_watch = 0;
1352 if(++i == tx_ring->count) i = 0;
1355 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1356 tx_ring->buffer_info[i].skb = skb;
1357 tx_ring->buffer_info[first].next_to_watch = i;
1363 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1365 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1366 struct ixgb_tx_desc *tx_desc = NULL;
1367 struct ixgb_buffer *buffer_info;
1368 uint32_t cmd_type_len = adapter->tx_cmd_type;
1373 if(tx_flags & IXGB_TX_FLAGS_TSO) {
1374 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1375 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1378 if(tx_flags & IXGB_TX_FLAGS_CSUM)
1379 popts |= IXGB_TX_DESC_POPTS_TXSM;
1381 if(tx_flags & IXGB_TX_FLAGS_VLAN) {
1382 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1385 i = tx_ring->next_to_use;
1388 buffer_info = &tx_ring->buffer_info[i];
1389 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1390 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1391 tx_desc->cmd_type_len =
1392 cpu_to_le32(cmd_type_len | buffer_info->length);
1393 tx_desc->status = status;
1394 tx_desc->popts = popts;
1395 tx_desc->vlan = cpu_to_le16(vlan_id);
1397 if(++i == tx_ring->count) i = 0;
1400 tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
1401 | IXGB_TX_DESC_CMD_RS );
1403 /* Force memory writes to complete before letting h/w
1404 * know there are new descriptors to fetch. (Only
1405 * applicable for weak-ordered memory model archs,
1406 * such as IA-64). */
1409 tx_ring->next_to_use = i;
1410 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1413 static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1415 struct ixgb_adapter *adapter = netdev_priv(netdev);
1416 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1418 netif_stop_queue(netdev);
1419 /* Herbert's original patch had:
1420 * smp_mb__after_netif_stop_queue();
1421 * but since that doesn't exist yet, just open code it. */
1424 /* We need to check again in a case another CPU has just
1425 * made room available. */
1426 if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1430 netif_start_queue(netdev);
1431 ++adapter->restart_queue;
1435 static int ixgb_maybe_stop_tx(struct net_device *netdev,
1436 struct ixgb_desc_ring *tx_ring, int size)
1438 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1440 return __ixgb_maybe_stop_tx(netdev, size);
1444 /* Tx Descriptors needed, worst case */
1445 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1446 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1447 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1448 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1449 + 1 /* one more needed for sentinel TSO workaround */
1452 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1454 struct ixgb_adapter *adapter = netdev_priv(netdev);
1456 unsigned int tx_flags = 0;
1457 unsigned long flags;
1462 dev_kfree_skb_any(skb);
1467 local_irq_save(flags);
1468 if (!spin_trylock(&adapter->tx_lock)) {
1469 /* Collision - tell upper layer to requeue */
1470 local_irq_restore(flags);
1471 return NETDEV_TX_LOCKED;
1474 spin_lock_irqsave(&adapter->tx_lock, flags);
1477 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1479 netif_stop_queue(netdev);
1480 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1481 return NETDEV_TX_BUSY;
1484 #ifndef NETIF_F_LLTX
1485 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1488 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1489 tx_flags |= IXGB_TX_FLAGS_VLAN;
1490 vlan_id = vlan_tx_tag_get(skb);
1493 first = adapter->tx_ring.next_to_use;
1495 tso = ixgb_tso(adapter, skb);
1497 dev_kfree_skb_any(skb);
1499 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1501 return NETDEV_TX_OK;
1505 tx_flags |= IXGB_TX_FLAGS_TSO;
1506 else if(ixgb_tx_csum(adapter, skb))
1507 tx_flags |= IXGB_TX_FLAGS_CSUM;
1509 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
1512 netdev->trans_start = jiffies;
1515 /* Make sure there is space in the ring for the next send. */
1516 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1518 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1521 return NETDEV_TX_OK;
1525 * ixgb_tx_timeout - Respond to a Tx Hang
1526 * @netdev: network interface device structure
1530 ixgb_tx_timeout(struct net_device *netdev)
1532 struct ixgb_adapter *adapter = netdev_priv(netdev);
1534 /* Do the reset outside of interrupt context */
1535 schedule_work(&adapter->tx_timeout_task);
1539 ixgb_tx_timeout_task(struct work_struct *work)
1541 struct ixgb_adapter *adapter =
1542 container_of(work, struct ixgb_adapter, tx_timeout_task);
1544 adapter->tx_timeout_count++;
1545 ixgb_down(adapter, TRUE);
1550 * ixgb_get_stats - Get System Network Statistics
1551 * @netdev: network interface device structure
1553 * Returns the address of the device statistics structure.
1554 * The statistics are actually updated from the timer callback.
1557 static struct net_device_stats *
1558 ixgb_get_stats(struct net_device *netdev)
1560 struct ixgb_adapter *adapter = netdev_priv(netdev);
1562 return &adapter->net_stats;
1566 * ixgb_change_mtu - Change the Maximum Transfer Unit
1567 * @netdev: network interface device structure
1568 * @new_mtu: new value for maximum frame size
1570 * Returns 0 on success, negative on failure
1574 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1576 struct ixgb_adapter *adapter = netdev_priv(netdev);
1577 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1578 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1581 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1582 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1583 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
1587 adapter->rx_buffer_len = max_frame;
1589 netdev->mtu = new_mtu;
1591 if ((old_max_frame != max_frame) && netif_running(netdev)) {
1592 ixgb_down(adapter, TRUE);
1600 * ixgb_update_stats - Update the board statistics counters.
1601 * @adapter: board private structure
1605 ixgb_update_stats(struct ixgb_adapter *adapter)
1607 struct net_device *netdev = adapter->netdev;
1608 struct pci_dev *pdev = adapter->pdev;
1610 /* Prevent stats update while adapter is being reset */
1611 if (pci_channel_offline(pdev))
1614 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1615 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1616 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1617 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1618 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1619 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1621 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1622 /* fix up multicast stats by removing broadcasts */
1626 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1627 adapter->stats.mprch += (multi >> 32);
1628 adapter->stats.bprcl += bcast_l;
1629 adapter->stats.bprch += bcast_h;
1631 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1632 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1633 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1634 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1636 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1637 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1638 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1639 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1640 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1641 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1642 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1643 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1644 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1645 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1646 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1647 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1648 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1649 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1650 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1651 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1652 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1653 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1654 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1655 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1656 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1657 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1658 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1659 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1660 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1661 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1662 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1663 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1664 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1665 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1666 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1667 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1668 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1669 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1670 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1671 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1672 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1673 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1674 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1675 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1676 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1677 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1678 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1679 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1680 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1681 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1682 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1683 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1684 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1685 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1686 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1687 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1688 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1689 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1690 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1691 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1693 /* Fill out the OS statistics structure */
1695 adapter->net_stats.rx_packets = adapter->stats.gprcl;
1696 adapter->net_stats.tx_packets = adapter->stats.gptcl;
1697 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
1698 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
1699 adapter->net_stats.multicast = adapter->stats.mprcl;
1700 adapter->net_stats.collisions = 0;
1702 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1703 * with a length in the type/len field */
1704 adapter->net_stats.rx_errors =
1705 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1706 adapter->stats.ruc +
1707 adapter->stats.roc /*+ adapter->stats.rlec */ +
1708 adapter->stats.icbc +
1709 adapter->stats.ecbc + adapter->stats.mpc;
1712 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1715 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
1716 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
1717 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
1718 adapter->net_stats.rx_over_errors = adapter->stats.mpc;
1720 adapter->net_stats.tx_errors = 0;
1721 adapter->net_stats.rx_frame_errors = 0;
1722 adapter->net_stats.tx_aborted_errors = 0;
1723 adapter->net_stats.tx_carrier_errors = 0;
1724 adapter->net_stats.tx_fifo_errors = 0;
1725 adapter->net_stats.tx_heartbeat_errors = 0;
1726 adapter->net_stats.tx_window_errors = 0;
1729 #define IXGB_MAX_INTR 10
1731 * ixgb_intr - Interrupt Handler
1732 * @irq: interrupt number
1733 * @data: pointer to a network interface device structure
1737 ixgb_intr(int irq, void *data)
1739 struct net_device *netdev = data;
1740 struct ixgb_adapter *adapter = netdev_priv(netdev);
1741 struct ixgb_hw *hw = &adapter->hw;
1742 uint32_t icr = IXGB_READ_REG(hw, ICR);
1743 #ifndef CONFIG_IXGB_NAPI
1748 return IRQ_NONE; /* Not our interrupt */
1750 if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
1751 mod_timer(&adapter->watchdog_timer, jiffies);
1754 #ifdef CONFIG_IXGB_NAPI
1755 if(netif_rx_schedule_prep(netdev)) {
1757 /* Disable interrupts and register for poll. The flush
1758 of the posted write is intentionally left out.
1761 atomic_inc(&adapter->irq_sem);
1762 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1763 __netif_rx_schedule(netdev);
1766 /* yes, that is actually a & and it is meant to make sure that
1767 * every pass through this for loop checks both receive and
1768 * transmit queues for completed descriptors, intended to
1769 * avoid starvation issues and assist tx/rx fairness. */
1770 for(i = 0; i < IXGB_MAX_INTR; i++)
1771 if(!ixgb_clean_rx_irq(adapter) &
1772 !ixgb_clean_tx_irq(adapter))
1778 #ifdef CONFIG_IXGB_NAPI
1780 * ixgb_clean - NAPI Rx polling callback
1781 * @adapter: board private structure
1785 ixgb_clean(struct net_device *netdev, int *budget)
1787 struct ixgb_adapter *adapter = netdev_priv(netdev);
1788 int work_to_do = min(*budget, netdev->quota);
1792 tx_cleaned = ixgb_clean_tx_irq(adapter);
1793 ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
1795 *budget -= work_done;
1796 netdev->quota -= work_done;
1798 /* if no Tx and not enough Rx work done, exit the polling mode */
1799 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1800 netif_rx_complete(netdev);
1801 ixgb_irq_enable(adapter);
1810 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1811 * @adapter: board private structure
1815 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1817 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1818 struct net_device *netdev = adapter->netdev;
1819 struct ixgb_tx_desc *tx_desc, *eop_desc;
1820 struct ixgb_buffer *buffer_info;
1821 unsigned int i, eop;
1822 boolean_t cleaned = FALSE;
1824 i = tx_ring->next_to_clean;
1825 eop = tx_ring->buffer_info[i].next_to_watch;
1826 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1828 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1830 for(cleaned = FALSE; !cleaned; ) {
1831 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1832 buffer_info = &tx_ring->buffer_info[i];
1835 & (IXGB_TX_DESC_POPTS_TXSM |
1836 IXGB_TX_DESC_POPTS_IXSM))
1837 adapter->hw_csum_tx_good++;
1839 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1841 *(uint32_t *)&(tx_desc->status) = 0;
1843 cleaned = (i == eop);
1844 if(++i == tx_ring->count) i = 0;
1847 eop = tx_ring->buffer_info[i].next_to_watch;
1848 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1851 tx_ring->next_to_clean = i;
1853 if (unlikely(netif_queue_stopped(netdev))) {
1854 spin_lock(&adapter->tx_lock);
1855 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1856 (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED))
1857 netif_wake_queue(netdev);
1858 spin_unlock(&adapter->tx_lock);
1861 if(adapter->detect_tx_hung) {
1862 /* detect a transmit hang in hardware, this serializes the
1863 * check with the clearing of time_stamp and movement of i */
1864 adapter->detect_tx_hung = FALSE;
1865 if (tx_ring->buffer_info[eop].dma &&
1866 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1867 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1868 IXGB_STATUS_TXOFF)) {
1869 /* detected Tx unit hang */
1870 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
1873 " next_to_use <%x>\n"
1874 " next_to_clean <%x>\n"
1875 "buffer_info[next_to_clean]\n"
1876 " time_stamp <%lx>\n"
1877 " next_to_watch <%x>\n"
1879 " next_to_watch.status <%x>\n",
1880 IXGB_READ_REG(&adapter->hw, TDH),
1881 IXGB_READ_REG(&adapter->hw, TDT),
1882 tx_ring->next_to_use,
1883 tx_ring->next_to_clean,
1884 tx_ring->buffer_info[eop].time_stamp,
1888 netif_stop_queue(netdev);
1896 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1897 * @adapter: board private structure
1898 * @rx_desc: receive descriptor
1899 * @sk_buff: socket buffer with received data
1903 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1904 struct ixgb_rx_desc *rx_desc,
1905 struct sk_buff *skb)
1907 /* Ignore Checksum bit is set OR
1908 * TCP Checksum has not been calculated
1910 if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1911 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1912 skb->ip_summed = CHECKSUM_NONE;
1916 /* At this point we know the hardware did the TCP checksum */
1917 /* now look at the TCP checksum error bit */
1918 if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1919 /* let the stack verify checksum errors */
1920 skb->ip_summed = CHECKSUM_NONE;
1921 adapter->hw_csum_rx_error++;
1923 /* TCP checksum is good */
1924 skb->ip_summed = CHECKSUM_UNNECESSARY;
1925 adapter->hw_csum_rx_good++;
1930 * ixgb_clean_rx_irq - Send received data up the network stack,
1931 * @adapter: board private structure
1935 #ifdef CONFIG_IXGB_NAPI
1936 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1938 ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1941 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1942 struct net_device *netdev = adapter->netdev;
1943 struct pci_dev *pdev = adapter->pdev;
1944 struct ixgb_rx_desc *rx_desc, *next_rxd;
1945 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1948 boolean_t cleaned = FALSE;
1950 i = rx_ring->next_to_clean;
1951 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1952 buffer_info = &rx_ring->buffer_info[i];
1954 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1955 struct sk_buff *skb, *next_skb;
1958 #ifdef CONFIG_IXGB_NAPI
1959 if(*work_done >= work_to_do)
1964 status = rx_desc->status;
1965 skb = buffer_info->skb;
1966 buffer_info->skb = NULL;
1968 prefetch(skb->data);
1970 if(++i == rx_ring->count) i = 0;
1971 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1974 if((j = i + 1) == rx_ring->count) j = 0;
1975 next2_buffer = &rx_ring->buffer_info[j];
1976 prefetch(next2_buffer);
1978 next_buffer = &rx_ring->buffer_info[i];
1979 next_skb = next_buffer->skb;
1984 pci_unmap_single(pdev,
1986 buffer_info->length,
1987 PCI_DMA_FROMDEVICE);
1989 length = le16_to_cpu(rx_desc->length);
1991 if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1993 /* All receives must fit into a single buffer */
1995 IXGB_DBG("Receive packet consumed multiple buffers "
1996 "length<%x>\n", length);
1998 dev_kfree_skb_irq(skb);
2002 if (unlikely(rx_desc->errors
2003 & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
2004 | IXGB_RX_DESC_ERRORS_P |
2005 IXGB_RX_DESC_ERRORS_RXE))) {
2007 dev_kfree_skb_irq(skb);
2011 /* code added for copybreak, this should improve
2012 * performance for small packets with large amounts
2013 * of reassembly being done in the stack */
2014 #define IXGB_CB_LENGTH 256
2015 if (length < IXGB_CB_LENGTH) {
2016 struct sk_buff *new_skb =
2017 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
2019 skb_reserve(new_skb, NET_IP_ALIGN);
2020 skb_copy_to_linear_data_offset(new_skb,
2026 /* save the skb in buffer_info as good */
2027 buffer_info->skb = skb;
2031 /* end copybreak code */
2034 skb_put(skb, length);
2036 /* Receive Checksum Offload */
2037 ixgb_rx_checksum(adapter, rx_desc, skb);
2039 skb->protocol = eth_type_trans(skb, netdev);
2040 #ifdef CONFIG_IXGB_NAPI
2041 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2042 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2043 le16_to_cpu(rx_desc->special) &
2044 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
2046 netif_receive_skb(skb);
2048 #else /* CONFIG_IXGB_NAPI */
2049 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2050 vlan_hwaccel_rx(skb, adapter->vlgrp,
2051 le16_to_cpu(rx_desc->special) &
2052 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
2056 #endif /* CONFIG_IXGB_NAPI */
2057 netdev->last_rx = jiffies;
2060 /* clean up descriptor, might be written over by hw */
2061 rx_desc->status = 0;
2063 /* use prefetched values */
2065 buffer_info = next_buffer;
2068 rx_ring->next_to_clean = i;
2070 ixgb_alloc_rx_buffers(adapter);
2076 * ixgb_alloc_rx_buffers - Replace used receive buffers
2077 * @adapter: address of board private structure
2081 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2083 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2084 struct net_device *netdev = adapter->netdev;
2085 struct pci_dev *pdev = adapter->pdev;
2086 struct ixgb_rx_desc *rx_desc;
2087 struct ixgb_buffer *buffer_info;
2088 struct sk_buff *skb;
2090 int num_group_tail_writes;
2093 i = rx_ring->next_to_use;
2094 buffer_info = &rx_ring->buffer_info[i];
2095 cleancount = IXGB_DESC_UNUSED(rx_ring);
2097 num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
2099 /* leave three descriptors unused */
2100 while(--cleancount > 2) {
2101 /* recycle! its good for you */
2102 skb = buffer_info->skb;
2108 skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
2110 if (unlikely(!skb)) {
2111 /* Better luck next round */
2112 adapter->alloc_rx_buff_failed++;
2116 /* Make buffer alignment 2 beyond a 16 byte boundary
2117 * this will result in a 16 byte aligned IP header after
2118 * the 14 byte MAC header is removed
2120 skb_reserve(skb, NET_IP_ALIGN);
2122 buffer_info->skb = skb;
2123 buffer_info->length = adapter->rx_buffer_len;
2125 buffer_info->dma = pci_map_single(pdev,
2127 adapter->rx_buffer_len,
2128 PCI_DMA_FROMDEVICE);
2130 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2131 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2132 /* guarantee DD bit not set now before h/w gets descriptor
2133 * this is the rest of the workaround for h/w double
2135 rx_desc->status = 0;
2138 if(++i == rx_ring->count) i = 0;
2139 buffer_info = &rx_ring->buffer_info[i];
2142 if (likely(rx_ring->next_to_use != i)) {
2143 rx_ring->next_to_use = i;
2144 if (unlikely(i-- == 0))
2145 i = (rx_ring->count - 1);
2147 /* Force memory writes to complete before letting h/w
2148 * know there are new descriptors to fetch. (Only
2149 * applicable for weak-ordered memory model archs, such
2152 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2157 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2159 * @param netdev network interface device structure
2160 * @param grp indicates to enable or disable tagging/stripping
2163 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2165 struct ixgb_adapter *adapter = netdev_priv(netdev);
2166 uint32_t ctrl, rctl;
2168 ixgb_irq_disable(adapter);
2169 adapter->vlgrp = grp;
2172 /* enable VLAN tag insert/strip */
2173 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2174 ctrl |= IXGB_CTRL0_VME;
2175 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2177 /* enable VLAN receive filtering */
2179 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2180 rctl |= IXGB_RCTL_VFE;
2181 rctl &= ~IXGB_RCTL_CFIEN;
2182 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2184 /* disable VLAN tag insert/strip */
2186 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2187 ctrl &= ~IXGB_CTRL0_VME;
2188 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2190 /* disable VLAN filtering */
2192 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2193 rctl &= ~IXGB_RCTL_VFE;
2194 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2197 ixgb_irq_enable(adapter);
2201 ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2203 struct ixgb_adapter *adapter = netdev_priv(netdev);
2204 uint32_t vfta, index;
2206 /* add VID to filter table */
2208 index = (vid >> 5) & 0x7F;
2209 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2210 vfta |= (1 << (vid & 0x1F));
2211 ixgb_write_vfta(&adapter->hw, index, vfta);
2215 ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2217 struct ixgb_adapter *adapter = netdev_priv(netdev);
2218 uint32_t vfta, index;
2220 ixgb_irq_disable(adapter);
2222 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2224 ixgb_irq_enable(adapter);
2226 /* remove VID from filter table*/
2228 index = (vid >> 5) & 0x7F;
2229 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2230 vfta &= ~(1 << (vid & 0x1F));
2231 ixgb_write_vfta(&adapter->hw, index, vfta);
2235 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2237 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2239 if(adapter->vlgrp) {
2241 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2242 if(!vlan_group_get_device(adapter->vlgrp, vid))
2244 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2249 #ifdef CONFIG_NET_POLL_CONTROLLER
2251 * Polling 'interrupt' - used by things like netconsole to send skbs
2252 * without having to re-enable interrupts. It's not called while
2253 * the interrupt routine is executing.
2256 static void ixgb_netpoll(struct net_device *dev)
2258 struct ixgb_adapter *adapter = netdev_priv(dev);
2260 disable_irq(adapter->pdev->irq);
2261 ixgb_intr(adapter->pdev->irq, dev);
2262 enable_irq(adapter->pdev->irq);
2267 * ixgb_io_error_detected() - called when PCI error is detected
2268 * @pdev pointer to pci device with error
2269 * @state pci channel state after error
2271 * This callback is called by the PCI subsystem whenever
2272 * a PCI bus error is detected.
2274 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
2275 enum pci_channel_state state)
2277 struct net_device *netdev = pci_get_drvdata(pdev);
2278 struct ixgb_adapter *adapter = netdev_priv(netdev);
2280 if(netif_running(netdev))
2281 ixgb_down(adapter, TRUE);
2283 pci_disable_device(pdev);
2285 /* Request a slot reset. */
2286 return PCI_ERS_RESULT_NEED_RESET;
2290 * ixgb_io_slot_reset - called after the pci bus has been reset.
2291 * @pdev pointer to pci device with error
2293 * This callback is called after the PCI buss has been reset.
2294 * Basically, this tries to restart the card from scratch.
2295 * This is a shortened version of the device probe/discovery code,
2296 * it resembles the first-half of the ixgb_probe() routine.
2298 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
2300 struct net_device *netdev = pci_get_drvdata(pdev);
2301 struct ixgb_adapter *adapter = netdev_priv(netdev);
2303 if(pci_enable_device(pdev)) {
2304 DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
2305 return PCI_ERS_RESULT_DISCONNECT;
2308 /* Perform card reset only on one instance of the card */
2309 if (0 != PCI_FUNC (pdev->devfn))
2310 return PCI_ERS_RESULT_RECOVERED;
2312 pci_set_master(pdev);
2314 netif_carrier_off(netdev);
2315 netif_stop_queue(netdev);
2316 ixgb_reset(adapter);
2318 /* Make sure the EEPROM is good */
2319 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2320 DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
2321 return PCI_ERS_RESULT_DISCONNECT;
2323 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2324 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2326 if(!is_valid_ether_addr(netdev->perm_addr)) {
2327 DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
2328 return PCI_ERS_RESULT_DISCONNECT;
2331 return PCI_ERS_RESULT_RECOVERED;
2335 * ixgb_io_resume - called when its OK to resume normal operations
2336 * @pdev pointer to pci device with error
2338 * The error recovery driver tells us that its OK to resume
2339 * normal operation. Implementation resembles the second-half
2340 * of the ixgb_probe() routine.
2342 static void ixgb_io_resume (struct pci_dev *pdev)
2344 struct net_device *netdev = pci_get_drvdata(pdev);
2345 struct ixgb_adapter *adapter = netdev_priv(netdev);
2347 pci_set_master(pdev);
2349 if(netif_running(netdev)) {
2350 if(ixgb_up(adapter)) {
2351 printk ("ixgb: can't bring device back up after reset\n");
2356 netif_device_attach(netdev);
2357 mod_timer(&adapter->watchdog_timer, jiffies);