]> Git Repo - linux.git/blob - drivers/net/ethernet/intel/e1000/e1000_main.c
Merge tag 'drm-next-2021-07-08-1' of git://anongit.freedesktop.org/drm/drm
[linux.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 #include "e1000.h"
5 #include <net/ip6_checksum.h>
6 #include <linux/io.h>
7 #include <linux/prefetch.h>
8 #include <linux/bitops.h>
9 #include <linux/if_vlan.h>
10
11 char e1000_driver_name[] = "e1000";
12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
13 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
14
15 /* e1000_pci_tbl - PCI Device ID Table
16  *
17  * Last entry must be all 0s
18  *
19  * Macro expands to...
20  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
21  */
22 static const struct pci_device_id e1000_pci_tbl[] = {
23         INTEL_E1000_ETHERNET_DEVICE(0x1000),
24         INTEL_E1000_ETHERNET_DEVICE(0x1001),
25         INTEL_E1000_ETHERNET_DEVICE(0x1004),
26         INTEL_E1000_ETHERNET_DEVICE(0x1008),
27         INTEL_E1000_ETHERNET_DEVICE(0x1009),
28         INTEL_E1000_ETHERNET_DEVICE(0x100C),
29         INTEL_E1000_ETHERNET_DEVICE(0x100D),
30         INTEL_E1000_ETHERNET_DEVICE(0x100E),
31         INTEL_E1000_ETHERNET_DEVICE(0x100F),
32         INTEL_E1000_ETHERNET_DEVICE(0x1010),
33         INTEL_E1000_ETHERNET_DEVICE(0x1011),
34         INTEL_E1000_ETHERNET_DEVICE(0x1012),
35         INTEL_E1000_ETHERNET_DEVICE(0x1013),
36         INTEL_E1000_ETHERNET_DEVICE(0x1014),
37         INTEL_E1000_ETHERNET_DEVICE(0x1015),
38         INTEL_E1000_ETHERNET_DEVICE(0x1016),
39         INTEL_E1000_ETHERNET_DEVICE(0x1017),
40         INTEL_E1000_ETHERNET_DEVICE(0x1018),
41         INTEL_E1000_ETHERNET_DEVICE(0x1019),
42         INTEL_E1000_ETHERNET_DEVICE(0x101A),
43         INTEL_E1000_ETHERNET_DEVICE(0x101D),
44         INTEL_E1000_ETHERNET_DEVICE(0x101E),
45         INTEL_E1000_ETHERNET_DEVICE(0x1026),
46         INTEL_E1000_ETHERNET_DEVICE(0x1027),
47         INTEL_E1000_ETHERNET_DEVICE(0x1028),
48         INTEL_E1000_ETHERNET_DEVICE(0x1075),
49         INTEL_E1000_ETHERNET_DEVICE(0x1076),
50         INTEL_E1000_ETHERNET_DEVICE(0x1077),
51         INTEL_E1000_ETHERNET_DEVICE(0x1078),
52         INTEL_E1000_ETHERNET_DEVICE(0x1079),
53         INTEL_E1000_ETHERNET_DEVICE(0x107A),
54         INTEL_E1000_ETHERNET_DEVICE(0x107B),
55         INTEL_E1000_ETHERNET_DEVICE(0x107C),
56         INTEL_E1000_ETHERNET_DEVICE(0x108A),
57         INTEL_E1000_ETHERNET_DEVICE(0x1099),
58         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
59         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
60         /* required last entry */
61         {0,}
62 };
63
64 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
65
66 int e1000_up(struct e1000_adapter *adapter);
67 void e1000_down(struct e1000_adapter *adapter);
68 void e1000_reinit_locked(struct e1000_adapter *adapter);
69 void e1000_reset(struct e1000_adapter *adapter);
70 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
71 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
72 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
73 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
74 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
75                                     struct e1000_tx_ring *txdr);
76 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
77                                     struct e1000_rx_ring *rxdr);
78 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
79                                     struct e1000_tx_ring *tx_ring);
80 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
81                                     struct e1000_rx_ring *rx_ring);
82 void e1000_update_stats(struct e1000_adapter *adapter);
83
84 static int e1000_init_module(void);
85 static void e1000_exit_module(void);
86 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
87 static void e1000_remove(struct pci_dev *pdev);
88 static int e1000_alloc_queues(struct e1000_adapter *adapter);
89 static int e1000_sw_init(struct e1000_adapter *adapter);
90 int e1000_open(struct net_device *netdev);
91 int e1000_close(struct net_device *netdev);
92 static void e1000_configure_tx(struct e1000_adapter *adapter);
93 static void e1000_configure_rx(struct e1000_adapter *adapter);
94 static void e1000_setup_rctl(struct e1000_adapter *adapter);
95 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
96 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
97 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
98                                 struct e1000_tx_ring *tx_ring);
99 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
100                                 struct e1000_rx_ring *rx_ring);
101 static void e1000_set_rx_mode(struct net_device *netdev);
102 static void e1000_update_phy_info_task(struct work_struct *work);
103 static void e1000_watchdog(struct work_struct *work);
104 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
105 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
106                                     struct net_device *netdev);
107 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
108 static int e1000_set_mac(struct net_device *netdev, void *p);
109 static irqreturn_t e1000_intr(int irq, void *data);
110 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
111                                struct e1000_tx_ring *tx_ring);
112 static int e1000_clean(struct napi_struct *napi, int budget);
113 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
114                                struct e1000_rx_ring *rx_ring,
115                                int *work_done, int work_to_do);
116 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
117                                      struct e1000_rx_ring *rx_ring,
118                                      int *work_done, int work_to_do);
119 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
120                                          struct e1000_rx_ring *rx_ring,
121                                          int cleaned_count)
122 {
123 }
124 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
125                                    struct e1000_rx_ring *rx_ring,
126                                    int cleaned_count);
127 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
128                                          struct e1000_rx_ring *rx_ring,
129                                          int cleaned_count);
130 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
131 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
132                            int cmd);
133 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
134 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
135 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
136 static void e1000_reset_task(struct work_struct *work);
137 static void e1000_smartspeed(struct e1000_adapter *adapter);
138 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
139                                        struct sk_buff *skb);
140
141 static bool e1000_vlan_used(struct e1000_adapter *adapter);
142 static void e1000_vlan_mode(struct net_device *netdev,
143                             netdev_features_t features);
144 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
145                                      bool filter_on);
146 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
147                                  __be16 proto, u16 vid);
148 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
149                                   __be16 proto, u16 vid);
150 static void e1000_restore_vlan(struct e1000_adapter *adapter);
151
152 static int __maybe_unused e1000_suspend(struct device *dev);
153 static int __maybe_unused e1000_resume(struct device *dev);
154 static void e1000_shutdown(struct pci_dev *pdev);
155
156 #ifdef CONFIG_NET_POLL_CONTROLLER
157 /* for netdump / net console */
158 static void e1000_netpoll (struct net_device *netdev);
159 #endif
160
161 #define COPYBREAK_DEFAULT 256
162 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
163 module_param(copybreak, uint, 0644);
164 MODULE_PARM_DESC(copybreak,
165         "Maximum size of packet that is copied to a new buffer on receive");
166
167 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
168                                                 pci_channel_state_t state);
169 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
170 static void e1000_io_resume(struct pci_dev *pdev);
171
172 static const struct pci_error_handlers e1000_err_handler = {
173         .error_detected = e1000_io_error_detected,
174         .slot_reset = e1000_io_slot_reset,
175         .resume = e1000_io_resume,
176 };
177
178 static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
179
180 static struct pci_driver e1000_driver = {
181         .name     = e1000_driver_name,
182         .id_table = e1000_pci_tbl,
183         .probe    = e1000_probe,
184         .remove   = e1000_remove,
185         .driver = {
186                 .pm = &e1000_pm_ops,
187         },
188         .shutdown = e1000_shutdown,
189         .err_handler = &e1000_err_handler
190 };
191
192 MODULE_AUTHOR("Intel Corporation, <[email protected]>");
193 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
194 MODULE_LICENSE("GPL v2");
195
196 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
197 static int debug = -1;
198 module_param(debug, int, 0);
199 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
200
201 /**
202  * e1000_get_hw_dev - helper function for getting netdev
203  * @hw: pointer to HW struct
204  *
205  * return device used by hardware layer to print debugging information
206  *
207  **/
208 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
209 {
210         struct e1000_adapter *adapter = hw->back;
211         return adapter->netdev;
212 }
213
214 /**
215  * e1000_init_module - Driver Registration Routine
216  *
217  * e1000_init_module is the first routine called when the driver is
218  * loaded. All it does is register with the PCI subsystem.
219  **/
220 static int __init e1000_init_module(void)
221 {
222         int ret;
223         pr_info("%s\n", e1000_driver_string);
224
225         pr_info("%s\n", e1000_copyright);
226
227         ret = pci_register_driver(&e1000_driver);
228         if (copybreak != COPYBREAK_DEFAULT) {
229                 if (copybreak == 0)
230                         pr_info("copybreak disabled\n");
231                 else
232                         pr_info("copybreak enabled for "
233                                    "packets <= %u bytes\n", copybreak);
234         }
235         return ret;
236 }
237
238 module_init(e1000_init_module);
239
240 /**
241  * e1000_exit_module - Driver Exit Cleanup Routine
242  *
243  * e1000_exit_module is called just before the driver is removed
244  * from memory.
245  **/
246 static void __exit e1000_exit_module(void)
247 {
248         pci_unregister_driver(&e1000_driver);
249 }
250
251 module_exit(e1000_exit_module);
252
253 static int e1000_request_irq(struct e1000_adapter *adapter)
254 {
255         struct net_device *netdev = adapter->netdev;
256         irq_handler_t handler = e1000_intr;
257         int irq_flags = IRQF_SHARED;
258         int err;
259
260         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
261                           netdev);
262         if (err) {
263                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
264         }
265
266         return err;
267 }
268
269 static void e1000_free_irq(struct e1000_adapter *adapter)
270 {
271         struct net_device *netdev = adapter->netdev;
272
273         free_irq(adapter->pdev->irq, netdev);
274 }
275
276 /**
277  * e1000_irq_disable - Mask off interrupt generation on the NIC
278  * @adapter: board private structure
279  **/
280 static void e1000_irq_disable(struct e1000_adapter *adapter)
281 {
282         struct e1000_hw *hw = &adapter->hw;
283
284         ew32(IMC, ~0);
285         E1000_WRITE_FLUSH();
286         synchronize_irq(adapter->pdev->irq);
287 }
288
289 /**
290  * e1000_irq_enable - Enable default interrupt generation settings
291  * @adapter: board private structure
292  **/
293 static void e1000_irq_enable(struct e1000_adapter *adapter)
294 {
295         struct e1000_hw *hw = &adapter->hw;
296
297         ew32(IMS, IMS_ENABLE_MASK);
298         E1000_WRITE_FLUSH();
299 }
300
301 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
302 {
303         struct e1000_hw *hw = &adapter->hw;
304         struct net_device *netdev = adapter->netdev;
305         u16 vid = hw->mng_cookie.vlan_id;
306         u16 old_vid = adapter->mng_vlan_id;
307
308         if (!e1000_vlan_used(adapter))
309                 return;
310
311         if (!test_bit(vid, adapter->active_vlans)) {
312                 if (hw->mng_cookie.status &
313                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
314                         e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
315                         adapter->mng_vlan_id = vid;
316                 } else {
317                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
318                 }
319                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
320                     (vid != old_vid) &&
321                     !test_bit(old_vid, adapter->active_vlans))
322                         e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
323                                                old_vid);
324         } else {
325                 adapter->mng_vlan_id = vid;
326         }
327 }
328
329 static void e1000_init_manageability(struct e1000_adapter *adapter)
330 {
331         struct e1000_hw *hw = &adapter->hw;
332
333         if (adapter->en_mng_pt) {
334                 u32 manc = er32(MANC);
335
336                 /* disable hardware interception of ARP */
337                 manc &= ~(E1000_MANC_ARP_EN);
338
339                 ew32(MANC, manc);
340         }
341 }
342
343 static void e1000_release_manageability(struct e1000_adapter *adapter)
344 {
345         struct e1000_hw *hw = &adapter->hw;
346
347         if (adapter->en_mng_pt) {
348                 u32 manc = er32(MANC);
349
350                 /* re-enable hardware interception of ARP */
351                 manc |= E1000_MANC_ARP_EN;
352
353                 ew32(MANC, manc);
354         }
355 }
356
357 /**
358  * e1000_configure - configure the hardware for RX and TX
359  * @adapter: private board structure
360  **/
361 static void e1000_configure(struct e1000_adapter *adapter)
362 {
363         struct net_device *netdev = adapter->netdev;
364         int i;
365
366         e1000_set_rx_mode(netdev);
367
368         e1000_restore_vlan(adapter);
369         e1000_init_manageability(adapter);
370
371         e1000_configure_tx(adapter);
372         e1000_setup_rctl(adapter);
373         e1000_configure_rx(adapter);
374         /* call E1000_DESC_UNUSED which always leaves
375          * at least 1 descriptor unused to make sure
376          * next_to_use != next_to_clean
377          */
378         for (i = 0; i < adapter->num_rx_queues; i++) {
379                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
380                 adapter->alloc_rx_buf(adapter, ring,
381                                       E1000_DESC_UNUSED(ring));
382         }
383 }
384
385 int e1000_up(struct e1000_adapter *adapter)
386 {
387         struct e1000_hw *hw = &adapter->hw;
388
389         /* hardware has been reset, we need to reload some things */
390         e1000_configure(adapter);
391
392         clear_bit(__E1000_DOWN, &adapter->flags);
393
394         napi_enable(&adapter->napi);
395
396         e1000_irq_enable(adapter);
397
398         netif_wake_queue(adapter->netdev);
399
400         /* fire a link change interrupt to start the watchdog */
401         ew32(ICS, E1000_ICS_LSC);
402         return 0;
403 }
404
405 /**
406  * e1000_power_up_phy - restore link in case the phy was powered down
407  * @adapter: address of board private structure
408  *
409  * The phy may be powered down to save power and turn off link when the
410  * driver is unloaded and wake on lan is not enabled (among others)
411  * *** this routine MUST be followed by a call to e1000_reset ***
412  **/
413 void e1000_power_up_phy(struct e1000_adapter *adapter)
414 {
415         struct e1000_hw *hw = &adapter->hw;
416         u16 mii_reg = 0;
417
418         /* Just clear the power down bit to wake the phy back up */
419         if (hw->media_type == e1000_media_type_copper) {
420                 /* according to the manual, the phy will retain its
421                  * settings across a power-down/up cycle
422                  */
423                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
424                 mii_reg &= ~MII_CR_POWER_DOWN;
425                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
426         }
427 }
428
429 static void e1000_power_down_phy(struct e1000_adapter *adapter)
430 {
431         struct e1000_hw *hw = &adapter->hw;
432
433         /* Power down the PHY so no link is implied when interface is down *
434          * The PHY cannot be powered down if any of the following is true *
435          * (a) WoL is enabled
436          * (b) AMT is active
437          * (c) SoL/IDER session is active
438          */
439         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
440            hw->media_type == e1000_media_type_copper) {
441                 u16 mii_reg = 0;
442
443                 switch (hw->mac_type) {
444                 case e1000_82540:
445                 case e1000_82545:
446                 case e1000_82545_rev_3:
447                 case e1000_82546:
448                 case e1000_ce4100:
449                 case e1000_82546_rev_3:
450                 case e1000_82541:
451                 case e1000_82541_rev_2:
452                 case e1000_82547:
453                 case e1000_82547_rev_2:
454                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
455                                 goto out;
456                         break;
457                 default:
458                         goto out;
459                 }
460                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
461                 mii_reg |= MII_CR_POWER_DOWN;
462                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
463                 msleep(1);
464         }
465 out:
466         return;
467 }
468
469 static void e1000_down_and_stop(struct e1000_adapter *adapter)
470 {
471         set_bit(__E1000_DOWN, &adapter->flags);
472
473         cancel_delayed_work_sync(&adapter->watchdog_task);
474
475         /*
476          * Since the watchdog task can reschedule other tasks, we should cancel
477          * it first, otherwise we can run into the situation when a work is
478          * still running after the adapter has been turned down.
479          */
480
481         cancel_delayed_work_sync(&adapter->phy_info_task);
482         cancel_delayed_work_sync(&adapter->fifo_stall_task);
483
484         /* Only kill reset task if adapter is not resetting */
485         if (!test_bit(__E1000_RESETTING, &adapter->flags))
486                 cancel_work_sync(&adapter->reset_task);
487 }
488
489 void e1000_down(struct e1000_adapter *adapter)
490 {
491         struct e1000_hw *hw = &adapter->hw;
492         struct net_device *netdev = adapter->netdev;
493         u32 rctl, tctl;
494
495         /* disable receives in the hardware */
496         rctl = er32(RCTL);
497         ew32(RCTL, rctl & ~E1000_RCTL_EN);
498         /* flush and sleep below */
499
500         netif_tx_disable(netdev);
501
502         /* disable transmits in the hardware */
503         tctl = er32(TCTL);
504         tctl &= ~E1000_TCTL_EN;
505         ew32(TCTL, tctl);
506         /* flush both disables and wait for them to finish */
507         E1000_WRITE_FLUSH();
508         msleep(10);
509
510         /* Set the carrier off after transmits have been disabled in the
511          * hardware, to avoid race conditions with e1000_watchdog() (which
512          * may be running concurrently to us, checking for the carrier
513          * bit to decide whether it should enable transmits again). Such
514          * a race condition would result into transmission being disabled
515          * in the hardware until the next IFF_DOWN+IFF_UP cycle.
516          */
517         netif_carrier_off(netdev);
518
519         napi_disable(&adapter->napi);
520
521         e1000_irq_disable(adapter);
522
523         /* Setting DOWN must be after irq_disable to prevent
524          * a screaming interrupt.  Setting DOWN also prevents
525          * tasks from rescheduling.
526          */
527         e1000_down_and_stop(adapter);
528
529         adapter->link_speed = 0;
530         adapter->link_duplex = 0;
531
532         e1000_reset(adapter);
533         e1000_clean_all_tx_rings(adapter);
534         e1000_clean_all_rx_rings(adapter);
535 }
536
537 void e1000_reinit_locked(struct e1000_adapter *adapter)
538 {
539         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
540                 msleep(1);
541
542         /* only run the task if not already down */
543         if (!test_bit(__E1000_DOWN, &adapter->flags)) {
544                 e1000_down(adapter);
545                 e1000_up(adapter);
546         }
547
548         clear_bit(__E1000_RESETTING, &adapter->flags);
549 }
550
551 void e1000_reset(struct e1000_adapter *adapter)
552 {
553         struct e1000_hw *hw = &adapter->hw;
554         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
555         bool legacy_pba_adjust = false;
556         u16 hwm;
557
558         /* Repartition Pba for greater than 9k mtu
559          * To take effect CTRL.RST is required.
560          */
561
562         switch (hw->mac_type) {
563         case e1000_82542_rev2_0:
564         case e1000_82542_rev2_1:
565         case e1000_82543:
566         case e1000_82544:
567         case e1000_82540:
568         case e1000_82541:
569         case e1000_82541_rev_2:
570                 legacy_pba_adjust = true;
571                 pba = E1000_PBA_48K;
572                 break;
573         case e1000_82545:
574         case e1000_82545_rev_3:
575         case e1000_82546:
576         case e1000_ce4100:
577         case e1000_82546_rev_3:
578                 pba = E1000_PBA_48K;
579                 break;
580         case e1000_82547:
581         case e1000_82547_rev_2:
582                 legacy_pba_adjust = true;
583                 pba = E1000_PBA_30K;
584                 break;
585         case e1000_undefined:
586         case e1000_num_macs:
587                 break;
588         }
589
590         if (legacy_pba_adjust) {
591                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
592                         pba -= 8; /* allocate more FIFO for Tx */
593
594                 if (hw->mac_type == e1000_82547) {
595                         adapter->tx_fifo_head = 0;
596                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
597                         adapter->tx_fifo_size =
598                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
599                         atomic_set(&adapter->tx_fifo_stall, 0);
600                 }
601         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
602                 /* adjust PBA for jumbo frames */
603                 ew32(PBA, pba);
604
605                 /* To maintain wire speed transmits, the Tx FIFO should be
606                  * large enough to accommodate two full transmit packets,
607                  * rounded up to the next 1KB and expressed in KB.  Likewise,
608                  * the Rx FIFO should be large enough to accommodate at least
609                  * one full receive packet and is similarly rounded up and
610                  * expressed in KB.
611                  */
612                 pba = er32(PBA);
613                 /* upper 16 bits has Tx packet buffer allocation size in KB */
614                 tx_space = pba >> 16;
615                 /* lower 16 bits has Rx packet buffer allocation size in KB */
616                 pba &= 0xffff;
617                 /* the Tx fifo also stores 16 bytes of information about the Tx
618                  * but don't include ethernet FCS because hardware appends it
619                  */
620                 min_tx_space = (hw->max_frame_size +
621                                 sizeof(struct e1000_tx_desc) -
622                                 ETH_FCS_LEN) * 2;
623                 min_tx_space = ALIGN(min_tx_space, 1024);
624                 min_tx_space >>= 10;
625                 /* software strips receive CRC, so leave room for it */
626                 min_rx_space = hw->max_frame_size;
627                 min_rx_space = ALIGN(min_rx_space, 1024);
628                 min_rx_space >>= 10;
629
630                 /* If current Tx allocation is less than the min Tx FIFO size,
631                  * and the min Tx FIFO size is less than the current Rx FIFO
632                  * allocation, take space away from current Rx allocation
633                  */
634                 if (tx_space < min_tx_space &&
635                     ((min_tx_space - tx_space) < pba)) {
636                         pba = pba - (min_tx_space - tx_space);
637
638                         /* PCI/PCIx hardware has PBA alignment constraints */
639                         switch (hw->mac_type) {
640                         case e1000_82545 ... e1000_82546_rev_3:
641                                 pba &= ~(E1000_PBA_8K - 1);
642                                 break;
643                         default:
644                                 break;
645                         }
646
647                         /* if short on Rx space, Rx wins and must trump Tx
648                          * adjustment or use Early Receive if available
649                          */
650                         if (pba < min_rx_space)
651                                 pba = min_rx_space;
652                 }
653         }
654
655         ew32(PBA, pba);
656
657         /* flow control settings:
658          * The high water mark must be low enough to fit one full frame
659          * (or the size used for early receive) above it in the Rx FIFO.
660          * Set it to the lower of:
661          * - 90% of the Rx FIFO size, and
662          * - the full Rx FIFO size minus the early receive size (for parts
663          *   with ERT support assuming ERT set to E1000_ERT_2048), or
664          * - the full Rx FIFO size minus one full frame
665          */
666         hwm = min(((pba << 10) * 9 / 10),
667                   ((pba << 10) - hw->max_frame_size));
668
669         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
670         hw->fc_low_water = hw->fc_high_water - 8;
671         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
672         hw->fc_send_xon = 1;
673         hw->fc = hw->original_fc;
674
675         /* Allow time for pending master requests to run */
676         e1000_reset_hw(hw);
677         if (hw->mac_type >= e1000_82544)
678                 ew32(WUC, 0);
679
680         if (e1000_init_hw(hw))
681                 e_dev_err("Hardware Error\n");
682         e1000_update_mng_vlan(adapter);
683
684         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
685         if (hw->mac_type >= e1000_82544 &&
686             hw->autoneg == 1 &&
687             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
688                 u32 ctrl = er32(CTRL);
689                 /* clear phy power management bit if we are in gig only mode,
690                  * which if enabled will attempt negotiation to 100Mb, which
691                  * can cause a loss of link at power off or driver unload
692                  */
693                 ctrl &= ~E1000_CTRL_SWDPIN3;
694                 ew32(CTRL, ctrl);
695         }
696
697         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
698         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
699
700         e1000_reset_adaptive(hw);
701         e1000_phy_get_info(hw, &adapter->phy_info);
702
703         e1000_release_manageability(adapter);
704 }
705
706 /* Dump the eeprom for users having checksum issues */
707 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
708 {
709         struct net_device *netdev = adapter->netdev;
710         struct ethtool_eeprom eeprom;
711         const struct ethtool_ops *ops = netdev->ethtool_ops;
712         u8 *data;
713         int i;
714         u16 csum_old, csum_new = 0;
715
716         eeprom.len = ops->get_eeprom_len(netdev);
717         eeprom.offset = 0;
718
719         data = kmalloc(eeprom.len, GFP_KERNEL);
720         if (!data)
721                 return;
722
723         ops->get_eeprom(netdev, &eeprom, data);
724
725         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
726                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
727         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
728                 csum_new += data[i] + (data[i + 1] << 8);
729         csum_new = EEPROM_SUM - csum_new;
730
731         pr_err("/*********************/\n");
732         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
733         pr_err("Calculated              : 0x%04x\n", csum_new);
734
735         pr_err("Offset    Values\n");
736         pr_err("========  ======\n");
737         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
738
739         pr_err("Include this output when contacting your support provider.\n");
740         pr_err("This is not a software error! Something bad happened to\n");
741         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
742         pr_err("result in further problems, possibly loss of data,\n");
743         pr_err("corruption or system hangs!\n");
744         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
745         pr_err("which is invalid and requires you to set the proper MAC\n");
746         pr_err("address manually before continuing to enable this network\n");
747         pr_err("device. Please inspect the EEPROM dump and report the\n");
748         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
749         pr_err("/*********************/\n");
750
751         kfree(data);
752 }
753
754 /**
755  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
756  * @pdev: PCI device information struct
757  *
758  * Return true if an adapter needs ioport resources
759  **/
760 static int e1000_is_need_ioport(struct pci_dev *pdev)
761 {
762         switch (pdev->device) {
763         case E1000_DEV_ID_82540EM:
764         case E1000_DEV_ID_82540EM_LOM:
765         case E1000_DEV_ID_82540EP:
766         case E1000_DEV_ID_82540EP_LOM:
767         case E1000_DEV_ID_82540EP_LP:
768         case E1000_DEV_ID_82541EI:
769         case E1000_DEV_ID_82541EI_MOBILE:
770         case E1000_DEV_ID_82541ER:
771         case E1000_DEV_ID_82541ER_LOM:
772         case E1000_DEV_ID_82541GI:
773         case E1000_DEV_ID_82541GI_LF:
774         case E1000_DEV_ID_82541GI_MOBILE:
775         case E1000_DEV_ID_82544EI_COPPER:
776         case E1000_DEV_ID_82544EI_FIBER:
777         case E1000_DEV_ID_82544GC_COPPER:
778         case E1000_DEV_ID_82544GC_LOM:
779         case E1000_DEV_ID_82545EM_COPPER:
780         case E1000_DEV_ID_82545EM_FIBER:
781         case E1000_DEV_ID_82546EB_COPPER:
782         case E1000_DEV_ID_82546EB_FIBER:
783         case E1000_DEV_ID_82546EB_QUAD_COPPER:
784                 return true;
785         default:
786                 return false;
787         }
788 }
789
790 static netdev_features_t e1000_fix_features(struct net_device *netdev,
791         netdev_features_t features)
792 {
793         /* Since there is no support for separate Rx/Tx vlan accel
794          * enable/disable make sure Tx flag is always in same state as Rx.
795          */
796         if (features & NETIF_F_HW_VLAN_CTAG_RX)
797                 features |= NETIF_F_HW_VLAN_CTAG_TX;
798         else
799                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
800
801         return features;
802 }
803
804 static int e1000_set_features(struct net_device *netdev,
805         netdev_features_t features)
806 {
807         struct e1000_adapter *adapter = netdev_priv(netdev);
808         netdev_features_t changed = features ^ netdev->features;
809
810         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
811                 e1000_vlan_mode(netdev, features);
812
813         if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
814                 return 0;
815
816         netdev->features = features;
817         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
818
819         if (netif_running(netdev))
820                 e1000_reinit_locked(adapter);
821         else
822                 e1000_reset(adapter);
823
824         return 1;
825 }
826
827 static const struct net_device_ops e1000_netdev_ops = {
828         .ndo_open               = e1000_open,
829         .ndo_stop               = e1000_close,
830         .ndo_start_xmit         = e1000_xmit_frame,
831         .ndo_set_rx_mode        = e1000_set_rx_mode,
832         .ndo_set_mac_address    = e1000_set_mac,
833         .ndo_tx_timeout         = e1000_tx_timeout,
834         .ndo_change_mtu         = e1000_change_mtu,
835         .ndo_do_ioctl           = e1000_ioctl,
836         .ndo_validate_addr      = eth_validate_addr,
837         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
838         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
839 #ifdef CONFIG_NET_POLL_CONTROLLER
840         .ndo_poll_controller    = e1000_netpoll,
841 #endif
842         .ndo_fix_features       = e1000_fix_features,
843         .ndo_set_features       = e1000_set_features,
844 };
845
846 /**
847  * e1000_init_hw_struct - initialize members of hw struct
848  * @adapter: board private struct
849  * @hw: structure used by e1000_hw.c
850  *
851  * Factors out initialization of the e1000_hw struct to its own function
852  * that can be called very early at init (just after struct allocation).
853  * Fields are initialized based on PCI device information and
854  * OS network device settings (MTU size).
855  * Returns negative error codes if MAC type setup fails.
856  */
857 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
858                                 struct e1000_hw *hw)
859 {
860         struct pci_dev *pdev = adapter->pdev;
861
862         /* PCI config space info */
863         hw->vendor_id = pdev->vendor;
864         hw->device_id = pdev->device;
865         hw->subsystem_vendor_id = pdev->subsystem_vendor;
866         hw->subsystem_id = pdev->subsystem_device;
867         hw->revision_id = pdev->revision;
868
869         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
870
871         hw->max_frame_size = adapter->netdev->mtu +
872                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
873         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
874
875         /* identify the MAC */
876         if (e1000_set_mac_type(hw)) {
877                 e_err(probe, "Unknown MAC Type\n");
878                 return -EIO;
879         }
880
881         switch (hw->mac_type) {
882         default:
883                 break;
884         case e1000_82541:
885         case e1000_82547:
886         case e1000_82541_rev_2:
887         case e1000_82547_rev_2:
888                 hw->phy_init_script = 1;
889                 break;
890         }
891
892         e1000_set_media_type(hw);
893         e1000_get_bus_info(hw);
894
895         hw->wait_autoneg_complete = false;
896         hw->tbi_compatibility_en = true;
897         hw->adaptive_ifs = true;
898
899         /* Copper options */
900
901         if (hw->media_type == e1000_media_type_copper) {
902                 hw->mdix = AUTO_ALL_MODES;
903                 hw->disable_polarity_correction = false;
904                 hw->master_slave = E1000_MASTER_SLAVE;
905         }
906
907         return 0;
908 }
909
910 /**
911  * e1000_probe - Device Initialization Routine
912  * @pdev: PCI device information struct
913  * @ent: entry in e1000_pci_tbl
914  *
915  * Returns 0 on success, negative on failure
916  *
917  * e1000_probe initializes an adapter identified by a pci_dev structure.
918  * The OS initialization, configuring of the adapter private structure,
919  * and a hardware reset occur.
920  **/
921 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
922 {
923         struct net_device *netdev;
924         struct e1000_adapter *adapter = NULL;
925         struct e1000_hw *hw;
926
927         static int cards_found;
928         static int global_quad_port_a; /* global ksp3 port a indication */
929         int i, err, pci_using_dac;
930         u16 eeprom_data = 0;
931         u16 tmp = 0;
932         u16 eeprom_apme_mask = E1000_EEPROM_APME;
933         int bars, need_ioport;
934         bool disable_dev = false;
935
936         /* do not allocate ioport bars when not needed */
937         need_ioport = e1000_is_need_ioport(pdev);
938         if (need_ioport) {
939                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
940                 err = pci_enable_device(pdev);
941         } else {
942                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
943                 err = pci_enable_device_mem(pdev);
944         }
945         if (err)
946                 return err;
947
948         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
949         if (err)
950                 goto err_pci_reg;
951
952         pci_set_master(pdev);
953         err = pci_save_state(pdev);
954         if (err)
955                 goto err_alloc_etherdev;
956
957         err = -ENOMEM;
958         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
959         if (!netdev)
960                 goto err_alloc_etherdev;
961
962         SET_NETDEV_DEV(netdev, &pdev->dev);
963
964         pci_set_drvdata(pdev, netdev);
965         adapter = netdev_priv(netdev);
966         adapter->netdev = netdev;
967         adapter->pdev = pdev;
968         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
969         adapter->bars = bars;
970         adapter->need_ioport = need_ioport;
971
972         hw = &adapter->hw;
973         hw->back = adapter;
974
975         err = -EIO;
976         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
977         if (!hw->hw_addr)
978                 goto err_ioremap;
979
980         if (adapter->need_ioport) {
981                 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
982                         if (pci_resource_len(pdev, i) == 0)
983                                 continue;
984                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
985                                 hw->io_base = pci_resource_start(pdev, i);
986                                 break;
987                         }
988                 }
989         }
990
991         /* make ready for any if (hw->...) below */
992         err = e1000_init_hw_struct(adapter, hw);
993         if (err)
994                 goto err_sw_init;
995
996         /* there is a workaround being applied below that limits
997          * 64-bit DMA addresses to 64-bit hardware.  There are some
998          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
999          */
1000         pci_using_dac = 0;
1001         if ((hw->bus_type == e1000_bus_type_pcix) &&
1002             !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1003                 pci_using_dac = 1;
1004         } else {
1005                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1006                 if (err) {
1007                         pr_err("No usable DMA config, aborting\n");
1008                         goto err_dma;
1009                 }
1010         }
1011
1012         netdev->netdev_ops = &e1000_netdev_ops;
1013         e1000_set_ethtool_ops(netdev);
1014         netdev->watchdog_timeo = 5 * HZ;
1015         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1016
1017         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1018
1019         adapter->bd_number = cards_found;
1020
1021         /* setup the private structure */
1022
1023         err = e1000_sw_init(adapter);
1024         if (err)
1025                 goto err_sw_init;
1026
1027         err = -EIO;
1028         if (hw->mac_type == e1000_ce4100) {
1029                 hw->ce4100_gbe_mdio_base_virt =
1030                                         ioremap(pci_resource_start(pdev, BAR_1),
1031                                                 pci_resource_len(pdev, BAR_1));
1032
1033                 if (!hw->ce4100_gbe_mdio_base_virt)
1034                         goto err_mdio_ioremap;
1035         }
1036
1037         if (hw->mac_type >= e1000_82543) {
1038                 netdev->hw_features = NETIF_F_SG |
1039                                    NETIF_F_HW_CSUM |
1040                                    NETIF_F_HW_VLAN_CTAG_RX;
1041                 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1042                                    NETIF_F_HW_VLAN_CTAG_FILTER;
1043         }
1044
1045         if ((hw->mac_type >= e1000_82544) &&
1046            (hw->mac_type != e1000_82547))
1047                 netdev->hw_features |= NETIF_F_TSO;
1048
1049         netdev->priv_flags |= IFF_SUPP_NOFCS;
1050
1051         netdev->features |= netdev->hw_features;
1052         netdev->hw_features |= (NETIF_F_RXCSUM |
1053                                 NETIF_F_RXALL |
1054                                 NETIF_F_RXFCS);
1055
1056         if (pci_using_dac) {
1057                 netdev->features |= NETIF_F_HIGHDMA;
1058                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1059         }
1060
1061         netdev->vlan_features |= (NETIF_F_TSO |
1062                                   NETIF_F_HW_CSUM |
1063                                   NETIF_F_SG);
1064
1065         /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1066         if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1067             hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1068                 netdev->priv_flags |= IFF_UNICAST_FLT;
1069
1070         /* MTU range: 46 - 16110 */
1071         netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1072         netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1073
1074         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1075
1076         /* initialize eeprom parameters */
1077         if (e1000_init_eeprom_params(hw)) {
1078                 e_err(probe, "EEPROM initialization failed\n");
1079                 goto err_eeprom;
1080         }
1081
1082         /* before reading the EEPROM, reset the controller to
1083          * put the device in a known good starting state
1084          */
1085
1086         e1000_reset_hw(hw);
1087
1088         /* make sure the EEPROM is good */
1089         if (e1000_validate_eeprom_checksum(hw) < 0) {
1090                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1091                 e1000_dump_eeprom(adapter);
1092                 /* set MAC address to all zeroes to invalidate and temporary
1093                  * disable this device for the user. This blocks regular
1094                  * traffic while still permitting ethtool ioctls from reaching
1095                  * the hardware as well as allowing the user to run the
1096                  * interface after manually setting a hw addr using
1097                  * `ip set address`
1098                  */
1099                 memset(hw->mac_addr, 0, netdev->addr_len);
1100         } else {
1101                 /* copy the MAC address out of the EEPROM */
1102                 if (e1000_read_mac_addr(hw))
1103                         e_err(probe, "EEPROM Read Error\n");
1104         }
1105         /* don't block initialization here due to bad MAC address */
1106         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1107
1108         if (!is_valid_ether_addr(netdev->dev_addr))
1109                 e_err(probe, "Invalid MAC Address\n");
1110
1111
1112         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1113         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1114                           e1000_82547_tx_fifo_stall_task);
1115         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1116         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1117
1118         e1000_check_options(adapter);
1119
1120         /* Initial Wake on LAN setting
1121          * If APM wake is enabled in the EEPROM,
1122          * enable the ACPI Magic Packet filter
1123          */
1124
1125         switch (hw->mac_type) {
1126         case e1000_82542_rev2_0:
1127         case e1000_82542_rev2_1:
1128         case e1000_82543:
1129                 break;
1130         case e1000_82544:
1131                 e1000_read_eeprom(hw,
1132                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1133                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1134                 break;
1135         case e1000_82546:
1136         case e1000_82546_rev_3:
1137                 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1138                         e1000_read_eeprom(hw,
1139                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1140                         break;
1141                 }
1142                 fallthrough;
1143         default:
1144                 e1000_read_eeprom(hw,
1145                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1146                 break;
1147         }
1148         if (eeprom_data & eeprom_apme_mask)
1149                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1150
1151         /* now that we have the eeprom settings, apply the special cases
1152          * where the eeprom may be wrong or the board simply won't support
1153          * wake on lan on a particular port
1154          */
1155         switch (pdev->device) {
1156         case E1000_DEV_ID_82546GB_PCIE:
1157                 adapter->eeprom_wol = 0;
1158                 break;
1159         case E1000_DEV_ID_82546EB_FIBER:
1160         case E1000_DEV_ID_82546GB_FIBER:
1161                 /* Wake events only supported on port A for dual fiber
1162                  * regardless of eeprom setting
1163                  */
1164                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1165                         adapter->eeprom_wol = 0;
1166                 break;
1167         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1168                 /* if quad port adapter, disable WoL on all but port A */
1169                 if (global_quad_port_a != 0)
1170                         adapter->eeprom_wol = 0;
1171                 else
1172                         adapter->quad_port_a = true;
1173                 /* Reset for multiple quad port adapters */
1174                 if (++global_quad_port_a == 4)
1175                         global_quad_port_a = 0;
1176                 break;
1177         }
1178
1179         /* initialize the wol settings based on the eeprom settings */
1180         adapter->wol = adapter->eeprom_wol;
1181         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1182
1183         /* Auto detect PHY address */
1184         if (hw->mac_type == e1000_ce4100) {
1185                 for (i = 0; i < 32; i++) {
1186                         hw->phy_addr = i;
1187                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1188
1189                         if (tmp != 0 && tmp != 0xFF)
1190                                 break;
1191                 }
1192
1193                 if (i >= 32)
1194                         goto err_eeprom;
1195         }
1196
1197         /* reset the hardware with the new settings */
1198         e1000_reset(adapter);
1199
1200         strcpy(netdev->name, "eth%d");
1201         err = register_netdev(netdev);
1202         if (err)
1203                 goto err_register;
1204
1205         e1000_vlan_filter_on_off(adapter, false);
1206
1207         /* print bus type/speed/width info */
1208         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1209                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1210                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1211                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1212                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1213                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1214                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1215                netdev->dev_addr);
1216
1217         /* carrier off reporting is important to ethtool even BEFORE open */
1218         netif_carrier_off(netdev);
1219
1220         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1221
1222         cards_found++;
1223         return 0;
1224
1225 err_register:
1226 err_eeprom:
1227         e1000_phy_hw_reset(hw);
1228
1229         if (hw->flash_address)
1230                 iounmap(hw->flash_address);
1231         kfree(adapter->tx_ring);
1232         kfree(adapter->rx_ring);
1233 err_dma:
1234 err_sw_init:
1235 err_mdio_ioremap:
1236         iounmap(hw->ce4100_gbe_mdio_base_virt);
1237         iounmap(hw->hw_addr);
1238 err_ioremap:
1239         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1240         free_netdev(netdev);
1241 err_alloc_etherdev:
1242         pci_release_selected_regions(pdev, bars);
1243 err_pci_reg:
1244         if (!adapter || disable_dev)
1245                 pci_disable_device(pdev);
1246         return err;
1247 }
1248
1249 /**
1250  * e1000_remove - Device Removal Routine
1251  * @pdev: PCI device information struct
1252  *
1253  * e1000_remove is called by the PCI subsystem to alert the driver
1254  * that it should release a PCI device. That could be caused by a
1255  * Hot-Plug event, or because the driver is going to be removed from
1256  * memory.
1257  **/
1258 static void e1000_remove(struct pci_dev *pdev)
1259 {
1260         struct net_device *netdev = pci_get_drvdata(pdev);
1261         struct e1000_adapter *adapter = netdev_priv(netdev);
1262         struct e1000_hw *hw = &adapter->hw;
1263         bool disable_dev;
1264
1265         e1000_down_and_stop(adapter);
1266         e1000_release_manageability(adapter);
1267
1268         unregister_netdev(netdev);
1269
1270         e1000_phy_hw_reset(hw);
1271
1272         kfree(adapter->tx_ring);
1273         kfree(adapter->rx_ring);
1274
1275         if (hw->mac_type == e1000_ce4100)
1276                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1277         iounmap(hw->hw_addr);
1278         if (hw->flash_address)
1279                 iounmap(hw->flash_address);
1280         pci_release_selected_regions(pdev, adapter->bars);
1281
1282         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1283         free_netdev(netdev);
1284
1285         if (disable_dev)
1286                 pci_disable_device(pdev);
1287 }
1288
1289 /**
1290  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1291  * @adapter: board private structure to initialize
1292  *
1293  * e1000_sw_init initializes the Adapter private data structure.
1294  * e1000_init_hw_struct MUST be called before this function
1295  **/
1296 static int e1000_sw_init(struct e1000_adapter *adapter)
1297 {
1298         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1299
1300         adapter->num_tx_queues = 1;
1301         adapter->num_rx_queues = 1;
1302
1303         if (e1000_alloc_queues(adapter)) {
1304                 e_err(probe, "Unable to allocate memory for queues\n");
1305                 return -ENOMEM;
1306         }
1307
1308         /* Explicitly disable IRQ since the NIC can be in any state. */
1309         e1000_irq_disable(adapter);
1310
1311         spin_lock_init(&adapter->stats_lock);
1312
1313         set_bit(__E1000_DOWN, &adapter->flags);
1314
1315         return 0;
1316 }
1317
1318 /**
1319  * e1000_alloc_queues - Allocate memory for all rings
1320  * @adapter: board private structure to initialize
1321  *
1322  * We allocate one ring per queue at run-time since we don't know the
1323  * number of queues at compile-time.
1324  **/
1325 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1326 {
1327         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1328                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1329         if (!adapter->tx_ring)
1330                 return -ENOMEM;
1331
1332         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1333                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1334         if (!adapter->rx_ring) {
1335                 kfree(adapter->tx_ring);
1336                 return -ENOMEM;
1337         }
1338
1339         return E1000_SUCCESS;
1340 }
1341
1342 /**
1343  * e1000_open - Called when a network interface is made active
1344  * @netdev: network interface device structure
1345  *
1346  * Returns 0 on success, negative value on failure
1347  *
1348  * The open entry point is called when a network interface is made
1349  * active by the system (IFF_UP).  At this point all resources needed
1350  * for transmit and receive operations are allocated, the interrupt
1351  * handler is registered with the OS, the watchdog task is started,
1352  * and the stack is notified that the interface is ready.
1353  **/
1354 int e1000_open(struct net_device *netdev)
1355 {
1356         struct e1000_adapter *adapter = netdev_priv(netdev);
1357         struct e1000_hw *hw = &adapter->hw;
1358         int err;
1359
1360         /* disallow open during test */
1361         if (test_bit(__E1000_TESTING, &adapter->flags))
1362                 return -EBUSY;
1363
1364         netif_carrier_off(netdev);
1365
1366         /* allocate transmit descriptors */
1367         err = e1000_setup_all_tx_resources(adapter);
1368         if (err)
1369                 goto err_setup_tx;
1370
1371         /* allocate receive descriptors */
1372         err = e1000_setup_all_rx_resources(adapter);
1373         if (err)
1374                 goto err_setup_rx;
1375
1376         e1000_power_up_phy(adapter);
1377
1378         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1379         if ((hw->mng_cookie.status &
1380                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1381                 e1000_update_mng_vlan(adapter);
1382         }
1383
1384         /* before we allocate an interrupt, we must be ready to handle it.
1385          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1386          * as soon as we call pci_request_irq, so we have to setup our
1387          * clean_rx handler before we do so.
1388          */
1389         e1000_configure(adapter);
1390
1391         err = e1000_request_irq(adapter);
1392         if (err)
1393                 goto err_req_irq;
1394
1395         /* From here on the code is the same as e1000_up() */
1396         clear_bit(__E1000_DOWN, &adapter->flags);
1397
1398         napi_enable(&adapter->napi);
1399
1400         e1000_irq_enable(adapter);
1401
1402         netif_start_queue(netdev);
1403
1404         /* fire a link status change interrupt to start the watchdog */
1405         ew32(ICS, E1000_ICS_LSC);
1406
1407         return E1000_SUCCESS;
1408
1409 err_req_irq:
1410         e1000_power_down_phy(adapter);
1411         e1000_free_all_rx_resources(adapter);
1412 err_setup_rx:
1413         e1000_free_all_tx_resources(adapter);
1414 err_setup_tx:
1415         e1000_reset(adapter);
1416
1417         return err;
1418 }
1419
1420 /**
1421  * e1000_close - Disables a network interface
1422  * @netdev: network interface device structure
1423  *
1424  * Returns 0, this is not allowed to fail
1425  *
1426  * The close entry point is called when an interface is de-activated
1427  * by the OS.  The hardware is still under the drivers control, but
1428  * needs to be disabled.  A global MAC reset is issued to stop the
1429  * hardware, and all transmit and receive resources are freed.
1430  **/
1431 int e1000_close(struct net_device *netdev)
1432 {
1433         struct e1000_adapter *adapter = netdev_priv(netdev);
1434         struct e1000_hw *hw = &adapter->hw;
1435         int count = E1000_CHECK_RESET_COUNT;
1436
1437         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1438                 usleep_range(10000, 20000);
1439
1440         WARN_ON(count < 0);
1441
1442         /* signal that we're down so that the reset task will no longer run */
1443         set_bit(__E1000_DOWN, &adapter->flags);
1444         clear_bit(__E1000_RESETTING, &adapter->flags);
1445
1446         e1000_down(adapter);
1447         e1000_power_down_phy(adapter);
1448         e1000_free_irq(adapter);
1449
1450         e1000_free_all_tx_resources(adapter);
1451         e1000_free_all_rx_resources(adapter);
1452
1453         /* kill manageability vlan ID if supported, but not if a vlan with
1454          * the same ID is registered on the host OS (let 8021q kill it)
1455          */
1456         if ((hw->mng_cookie.status &
1457              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1458             !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1459                 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1460                                        adapter->mng_vlan_id);
1461         }
1462
1463         return 0;
1464 }
1465
1466 /**
1467  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1468  * @adapter: address of board private structure
1469  * @start: address of beginning of memory
1470  * @len: length of memory
1471  **/
1472 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1473                                   unsigned long len)
1474 {
1475         struct e1000_hw *hw = &adapter->hw;
1476         unsigned long begin = (unsigned long)start;
1477         unsigned long end = begin + len;
1478
1479         /* First rev 82545 and 82546 need to not allow any memory
1480          * write location to cross 64k boundary due to errata 23
1481          */
1482         if (hw->mac_type == e1000_82545 ||
1483             hw->mac_type == e1000_ce4100 ||
1484             hw->mac_type == e1000_82546) {
1485                 return ((begin ^ (end - 1)) >> 16) == 0;
1486         }
1487
1488         return true;
1489 }
1490
1491 /**
1492  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1493  * @adapter: board private structure
1494  * @txdr:    tx descriptor ring (for a specific queue) to setup
1495  *
1496  * Return 0 on success, negative on failure
1497  **/
1498 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1499                                     struct e1000_tx_ring *txdr)
1500 {
1501         struct pci_dev *pdev = adapter->pdev;
1502         int size;
1503
1504         size = sizeof(struct e1000_tx_buffer) * txdr->count;
1505         txdr->buffer_info = vzalloc(size);
1506         if (!txdr->buffer_info)
1507                 return -ENOMEM;
1508
1509         /* round up to nearest 4K */
1510
1511         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1512         txdr->size = ALIGN(txdr->size, 4096);
1513
1514         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1515                                         GFP_KERNEL);
1516         if (!txdr->desc) {
1517 setup_tx_desc_die:
1518                 vfree(txdr->buffer_info);
1519                 return -ENOMEM;
1520         }
1521
1522         /* Fix for errata 23, can't cross 64kB boundary */
1523         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1524                 void *olddesc = txdr->desc;
1525                 dma_addr_t olddma = txdr->dma;
1526                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1527                       txdr->size, txdr->desc);
1528                 /* Try again, without freeing the previous */
1529                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1530                                                 &txdr->dma, GFP_KERNEL);
1531                 /* Failed allocation, critical failure */
1532                 if (!txdr->desc) {
1533                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1534                                           olddma);
1535                         goto setup_tx_desc_die;
1536                 }
1537
1538                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539                         /* give up */
1540                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1541                                           txdr->dma);
1542                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1543                                           olddma);
1544                         e_err(probe, "Unable to allocate aligned memory "
1545                               "for the transmit descriptor ring\n");
1546                         vfree(txdr->buffer_info);
1547                         return -ENOMEM;
1548                 } else {
1549                         /* Free old allocation, new allocation was successful */
1550                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551                                           olddma);
1552                 }
1553         }
1554         memset(txdr->desc, 0, txdr->size);
1555
1556         txdr->next_to_use = 0;
1557         txdr->next_to_clean = 0;
1558
1559         return 0;
1560 }
1561
1562 /**
1563  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1564  *                                (Descriptors) for all queues
1565  * @adapter: board private structure
1566  *
1567  * Return 0 on success, negative on failure
1568  **/
1569 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1570 {
1571         int i, err = 0;
1572
1573         for (i = 0; i < adapter->num_tx_queues; i++) {
1574                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1575                 if (err) {
1576                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1577                         for (i-- ; i >= 0; i--)
1578                                 e1000_free_tx_resources(adapter,
1579                                                         &adapter->tx_ring[i]);
1580                         break;
1581                 }
1582         }
1583
1584         return err;
1585 }
1586
1587 /**
1588  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1589  * @adapter: board private structure
1590  *
1591  * Configure the Tx unit of the MAC after a reset.
1592  **/
1593 static void e1000_configure_tx(struct e1000_adapter *adapter)
1594 {
1595         u64 tdba;
1596         struct e1000_hw *hw = &adapter->hw;
1597         u32 tdlen, tctl, tipg;
1598         u32 ipgr1, ipgr2;
1599
1600         /* Setup the HW Tx Head and Tail descriptor pointers */
1601
1602         switch (adapter->num_tx_queues) {
1603         case 1:
1604         default:
1605                 tdba = adapter->tx_ring[0].dma;
1606                 tdlen = adapter->tx_ring[0].count *
1607                         sizeof(struct e1000_tx_desc);
1608                 ew32(TDLEN, tdlen);
1609                 ew32(TDBAH, (tdba >> 32));
1610                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1611                 ew32(TDT, 0);
1612                 ew32(TDH, 0);
1613                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1614                                            E1000_TDH : E1000_82542_TDH);
1615                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1616                                            E1000_TDT : E1000_82542_TDT);
1617                 break;
1618         }
1619
1620         /* Set the default values for the Tx Inter Packet Gap timer */
1621         if ((hw->media_type == e1000_media_type_fiber ||
1622              hw->media_type == e1000_media_type_internal_serdes))
1623                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1624         else
1625                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1626
1627         switch (hw->mac_type) {
1628         case e1000_82542_rev2_0:
1629         case e1000_82542_rev2_1:
1630                 tipg = DEFAULT_82542_TIPG_IPGT;
1631                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1632                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1633                 break;
1634         default:
1635                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1636                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1637                 break;
1638         }
1639         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1640         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1641         ew32(TIPG, tipg);
1642
1643         /* Set the Tx Interrupt Delay register */
1644
1645         ew32(TIDV, adapter->tx_int_delay);
1646         if (hw->mac_type >= e1000_82540)
1647                 ew32(TADV, adapter->tx_abs_int_delay);
1648
1649         /* Program the Transmit Control Register */
1650
1651         tctl = er32(TCTL);
1652         tctl &= ~E1000_TCTL_CT;
1653         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1654                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1655
1656         e1000_config_collision_dist(hw);
1657
1658         /* Setup Transmit Descriptor Settings for eop descriptor */
1659         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1660
1661         /* only set IDE if we are delaying interrupts using the timers */
1662         if (adapter->tx_int_delay)
1663                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1664
1665         if (hw->mac_type < e1000_82543)
1666                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1667         else
1668                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1669
1670         /* Cache if we're 82544 running in PCI-X because we'll
1671          * need this to apply a workaround later in the send path.
1672          */
1673         if (hw->mac_type == e1000_82544 &&
1674             hw->bus_type == e1000_bus_type_pcix)
1675                 adapter->pcix_82544 = true;
1676
1677         ew32(TCTL, tctl);
1678
1679 }
1680
1681 /**
1682  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1683  * @adapter: board private structure
1684  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1685  *
1686  * Returns 0 on success, negative on failure
1687  **/
1688 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1689                                     struct e1000_rx_ring *rxdr)
1690 {
1691         struct pci_dev *pdev = adapter->pdev;
1692         int size, desc_len;
1693
1694         size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1695         rxdr->buffer_info = vzalloc(size);
1696         if (!rxdr->buffer_info)
1697                 return -ENOMEM;
1698
1699         desc_len = sizeof(struct e1000_rx_desc);
1700
1701         /* Round up to nearest 4K */
1702
1703         rxdr->size = rxdr->count * desc_len;
1704         rxdr->size = ALIGN(rxdr->size, 4096);
1705
1706         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1707                                         GFP_KERNEL);
1708         if (!rxdr->desc) {
1709 setup_rx_desc_die:
1710                 vfree(rxdr->buffer_info);
1711                 return -ENOMEM;
1712         }
1713
1714         /* Fix for errata 23, can't cross 64kB boundary */
1715         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1716                 void *olddesc = rxdr->desc;
1717                 dma_addr_t olddma = rxdr->dma;
1718                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1719                       rxdr->size, rxdr->desc);
1720                 /* Try again, without freeing the previous */
1721                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1722                                                 &rxdr->dma, GFP_KERNEL);
1723                 /* Failed allocation, critical failure */
1724                 if (!rxdr->desc) {
1725                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1726                                           olddma);
1727                         goto setup_rx_desc_die;
1728                 }
1729
1730                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1731                         /* give up */
1732                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1733                                           rxdr->dma);
1734                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1735                                           olddma);
1736                         e_err(probe, "Unable to allocate aligned memory for "
1737                               "the Rx descriptor ring\n");
1738                         goto setup_rx_desc_die;
1739                 } else {
1740                         /* Free old allocation, new allocation was successful */
1741                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1742                                           olddma);
1743                 }
1744         }
1745         memset(rxdr->desc, 0, rxdr->size);
1746
1747         rxdr->next_to_clean = 0;
1748         rxdr->next_to_use = 0;
1749         rxdr->rx_skb_top = NULL;
1750
1751         return 0;
1752 }
1753
1754 /**
1755  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1756  *                                (Descriptors) for all queues
1757  * @adapter: board private structure
1758  *
1759  * Return 0 on success, negative on failure
1760  **/
1761 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1762 {
1763         int i, err = 0;
1764
1765         for (i = 0; i < adapter->num_rx_queues; i++) {
1766                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1767                 if (err) {
1768                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1769                         for (i-- ; i >= 0; i--)
1770                                 e1000_free_rx_resources(adapter,
1771                                                         &adapter->rx_ring[i]);
1772                         break;
1773                 }
1774         }
1775
1776         return err;
1777 }
1778
1779 /**
1780  * e1000_setup_rctl - configure the receive control registers
1781  * @adapter: Board private structure
1782  **/
1783 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1784 {
1785         struct e1000_hw *hw = &adapter->hw;
1786         u32 rctl;
1787
1788         rctl = er32(RCTL);
1789
1790         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1791
1792         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1793                 E1000_RCTL_RDMTS_HALF |
1794                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1795
1796         if (hw->tbi_compatibility_on == 1)
1797                 rctl |= E1000_RCTL_SBP;
1798         else
1799                 rctl &= ~E1000_RCTL_SBP;
1800
1801         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1802                 rctl &= ~E1000_RCTL_LPE;
1803         else
1804                 rctl |= E1000_RCTL_LPE;
1805
1806         /* Setup buffer sizes */
1807         rctl &= ~E1000_RCTL_SZ_4096;
1808         rctl |= E1000_RCTL_BSEX;
1809         switch (adapter->rx_buffer_len) {
1810         case E1000_RXBUFFER_2048:
1811         default:
1812                 rctl |= E1000_RCTL_SZ_2048;
1813                 rctl &= ~E1000_RCTL_BSEX;
1814                 break;
1815         case E1000_RXBUFFER_4096:
1816                 rctl |= E1000_RCTL_SZ_4096;
1817                 break;
1818         case E1000_RXBUFFER_8192:
1819                 rctl |= E1000_RCTL_SZ_8192;
1820                 break;
1821         case E1000_RXBUFFER_16384:
1822                 rctl |= E1000_RCTL_SZ_16384;
1823                 break;
1824         }
1825
1826         /* This is useful for sniffing bad packets. */
1827         if (adapter->netdev->features & NETIF_F_RXALL) {
1828                 /* UPE and MPE will be handled by normal PROMISC logic
1829                  * in e1000e_set_rx_mode
1830                  */
1831                 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1832                          E1000_RCTL_BAM | /* RX All Bcast Pkts */
1833                          E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1834
1835                 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1836                           E1000_RCTL_DPF | /* Allow filtered pause */
1837                           E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1838                 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1839                  * and that breaks VLANs.
1840                  */
1841         }
1842
1843         ew32(RCTL, rctl);
1844 }
1845
1846 /**
1847  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1848  * @adapter: board private structure
1849  *
1850  * Configure the Rx unit of the MAC after a reset.
1851  **/
1852 static void e1000_configure_rx(struct e1000_adapter *adapter)
1853 {
1854         u64 rdba;
1855         struct e1000_hw *hw = &adapter->hw;
1856         u32 rdlen, rctl, rxcsum;
1857
1858         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1859                 rdlen = adapter->rx_ring[0].count *
1860                         sizeof(struct e1000_rx_desc);
1861                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1862                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1863         } else {
1864                 rdlen = adapter->rx_ring[0].count *
1865                         sizeof(struct e1000_rx_desc);
1866                 adapter->clean_rx = e1000_clean_rx_irq;
1867                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1868         }
1869
1870         /* disable receives while setting up the descriptors */
1871         rctl = er32(RCTL);
1872         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1873
1874         /* set the Receive Delay Timer Register */
1875         ew32(RDTR, adapter->rx_int_delay);
1876
1877         if (hw->mac_type >= e1000_82540) {
1878                 ew32(RADV, adapter->rx_abs_int_delay);
1879                 if (adapter->itr_setting != 0)
1880                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1881         }
1882
1883         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1884          * the Base and Length of the Rx Descriptor Ring
1885          */
1886         switch (adapter->num_rx_queues) {
1887         case 1:
1888         default:
1889                 rdba = adapter->rx_ring[0].dma;
1890                 ew32(RDLEN, rdlen);
1891                 ew32(RDBAH, (rdba >> 32));
1892                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1893                 ew32(RDT, 0);
1894                 ew32(RDH, 0);
1895                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1896                                            E1000_RDH : E1000_82542_RDH);
1897                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1898                                            E1000_RDT : E1000_82542_RDT);
1899                 break;
1900         }
1901
1902         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1903         if (hw->mac_type >= e1000_82543) {
1904                 rxcsum = er32(RXCSUM);
1905                 if (adapter->rx_csum)
1906                         rxcsum |= E1000_RXCSUM_TUOFL;
1907                 else
1908                         /* don't need to clear IPPCSE as it defaults to 0 */
1909                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1910                 ew32(RXCSUM, rxcsum);
1911         }
1912
1913         /* Enable Receives */
1914         ew32(RCTL, rctl | E1000_RCTL_EN);
1915 }
1916
1917 /**
1918  * e1000_free_tx_resources - Free Tx Resources per Queue
1919  * @adapter: board private structure
1920  * @tx_ring: Tx descriptor ring for a specific queue
1921  *
1922  * Free all transmit software resources
1923  **/
1924 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1925                                     struct e1000_tx_ring *tx_ring)
1926 {
1927         struct pci_dev *pdev = adapter->pdev;
1928
1929         e1000_clean_tx_ring(adapter, tx_ring);
1930
1931         vfree(tx_ring->buffer_info);
1932         tx_ring->buffer_info = NULL;
1933
1934         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1935                           tx_ring->dma);
1936
1937         tx_ring->desc = NULL;
1938 }
1939
1940 /**
1941  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1942  * @adapter: board private structure
1943  *
1944  * Free all transmit software resources
1945  **/
1946 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1947 {
1948         int i;
1949
1950         for (i = 0; i < adapter->num_tx_queues; i++)
1951                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1952 }
1953
1954 static void
1955 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1956                                  struct e1000_tx_buffer *buffer_info)
1957 {
1958         if (buffer_info->dma) {
1959                 if (buffer_info->mapped_as_page)
1960                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1961                                        buffer_info->length, DMA_TO_DEVICE);
1962                 else
1963                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1964                                          buffer_info->length,
1965                                          DMA_TO_DEVICE);
1966                 buffer_info->dma = 0;
1967         }
1968         if (buffer_info->skb) {
1969                 dev_kfree_skb_any(buffer_info->skb);
1970                 buffer_info->skb = NULL;
1971         }
1972         buffer_info->time_stamp = 0;
1973         /* buffer_info must be completely set up in the transmit path */
1974 }
1975
1976 /**
1977  * e1000_clean_tx_ring - Free Tx Buffers
1978  * @adapter: board private structure
1979  * @tx_ring: ring to be cleaned
1980  **/
1981 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1982                                 struct e1000_tx_ring *tx_ring)
1983 {
1984         struct e1000_hw *hw = &adapter->hw;
1985         struct e1000_tx_buffer *buffer_info;
1986         unsigned long size;
1987         unsigned int i;
1988
1989         /* Free all the Tx ring sk_buffs */
1990
1991         for (i = 0; i < tx_ring->count; i++) {
1992                 buffer_info = &tx_ring->buffer_info[i];
1993                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1994         }
1995
1996         netdev_reset_queue(adapter->netdev);
1997         size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1998         memset(tx_ring->buffer_info, 0, size);
1999
2000         /* Zero out the descriptor ring */
2001
2002         memset(tx_ring->desc, 0, tx_ring->size);
2003
2004         tx_ring->next_to_use = 0;
2005         tx_ring->next_to_clean = 0;
2006         tx_ring->last_tx_tso = false;
2007
2008         writel(0, hw->hw_addr + tx_ring->tdh);
2009         writel(0, hw->hw_addr + tx_ring->tdt);
2010 }
2011
2012 /**
2013  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2014  * @adapter: board private structure
2015  **/
2016 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2017 {
2018         int i;
2019
2020         for (i = 0; i < adapter->num_tx_queues; i++)
2021                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2022 }
2023
2024 /**
2025  * e1000_free_rx_resources - Free Rx Resources
2026  * @adapter: board private structure
2027  * @rx_ring: ring to clean the resources from
2028  *
2029  * Free all receive software resources
2030  **/
2031 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2032                                     struct e1000_rx_ring *rx_ring)
2033 {
2034         struct pci_dev *pdev = adapter->pdev;
2035
2036         e1000_clean_rx_ring(adapter, rx_ring);
2037
2038         vfree(rx_ring->buffer_info);
2039         rx_ring->buffer_info = NULL;
2040
2041         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2042                           rx_ring->dma);
2043
2044         rx_ring->desc = NULL;
2045 }
2046
2047 /**
2048  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2049  * @adapter: board private structure
2050  *
2051  * Free all receive software resources
2052  **/
2053 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2054 {
2055         int i;
2056
2057         for (i = 0; i < adapter->num_rx_queues; i++)
2058                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2059 }
2060
2061 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2062 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2063 {
2064         return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2065                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2066 }
2067
2068 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2069 {
2070         unsigned int len = e1000_frag_len(a);
2071         u8 *data = netdev_alloc_frag(len);
2072
2073         if (likely(data))
2074                 data += E1000_HEADROOM;
2075         return data;
2076 }
2077
2078 /**
2079  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2080  * @adapter: board private structure
2081  * @rx_ring: ring to free buffers from
2082  **/
2083 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2084                                 struct e1000_rx_ring *rx_ring)
2085 {
2086         struct e1000_hw *hw = &adapter->hw;
2087         struct e1000_rx_buffer *buffer_info;
2088         struct pci_dev *pdev = adapter->pdev;
2089         unsigned long size;
2090         unsigned int i;
2091
2092         /* Free all the Rx netfrags */
2093         for (i = 0; i < rx_ring->count; i++) {
2094                 buffer_info = &rx_ring->buffer_info[i];
2095                 if (adapter->clean_rx == e1000_clean_rx_irq) {
2096                         if (buffer_info->dma)
2097                                 dma_unmap_single(&pdev->dev, buffer_info->dma,
2098                                                  adapter->rx_buffer_len,
2099                                                  DMA_FROM_DEVICE);
2100                         if (buffer_info->rxbuf.data) {
2101                                 skb_free_frag(buffer_info->rxbuf.data);
2102                                 buffer_info->rxbuf.data = NULL;
2103                         }
2104                 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2105                         if (buffer_info->dma)
2106                                 dma_unmap_page(&pdev->dev, buffer_info->dma,
2107                                                adapter->rx_buffer_len,
2108                                                DMA_FROM_DEVICE);
2109                         if (buffer_info->rxbuf.page) {
2110                                 put_page(buffer_info->rxbuf.page);
2111                                 buffer_info->rxbuf.page = NULL;
2112                         }
2113                 }
2114
2115                 buffer_info->dma = 0;
2116         }
2117
2118         /* there also may be some cached data from a chained receive */
2119         napi_free_frags(&adapter->napi);
2120         rx_ring->rx_skb_top = NULL;
2121
2122         size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2123         memset(rx_ring->buffer_info, 0, size);
2124
2125         /* Zero out the descriptor ring */
2126         memset(rx_ring->desc, 0, rx_ring->size);
2127
2128         rx_ring->next_to_clean = 0;
2129         rx_ring->next_to_use = 0;
2130
2131         writel(0, hw->hw_addr + rx_ring->rdh);
2132         writel(0, hw->hw_addr + rx_ring->rdt);
2133 }
2134
2135 /**
2136  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2137  * @adapter: board private structure
2138  **/
2139 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2140 {
2141         int i;
2142
2143         for (i = 0; i < adapter->num_rx_queues; i++)
2144                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2145 }
2146
2147 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2148  * and memory write and invalidate disabled for certain operations
2149  */
2150 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2151 {
2152         struct e1000_hw *hw = &adapter->hw;
2153         struct net_device *netdev = adapter->netdev;
2154         u32 rctl;
2155
2156         e1000_pci_clear_mwi(hw);
2157
2158         rctl = er32(RCTL);
2159         rctl |= E1000_RCTL_RST;
2160         ew32(RCTL, rctl);
2161         E1000_WRITE_FLUSH();
2162         mdelay(5);
2163
2164         if (netif_running(netdev))
2165                 e1000_clean_all_rx_rings(adapter);
2166 }
2167
2168 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2169 {
2170         struct e1000_hw *hw = &adapter->hw;
2171         struct net_device *netdev = adapter->netdev;
2172         u32 rctl;
2173
2174         rctl = er32(RCTL);
2175         rctl &= ~E1000_RCTL_RST;
2176         ew32(RCTL, rctl);
2177         E1000_WRITE_FLUSH();
2178         mdelay(5);
2179
2180         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2181                 e1000_pci_set_mwi(hw);
2182
2183         if (netif_running(netdev)) {
2184                 /* No need to loop, because 82542 supports only 1 queue */
2185                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2186                 e1000_configure_rx(adapter);
2187                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2188         }
2189 }
2190
2191 /**
2192  * e1000_set_mac - Change the Ethernet Address of the NIC
2193  * @netdev: network interface device structure
2194  * @p: pointer to an address structure
2195  *
2196  * Returns 0 on success, negative on failure
2197  **/
2198 static int e1000_set_mac(struct net_device *netdev, void *p)
2199 {
2200         struct e1000_adapter *adapter = netdev_priv(netdev);
2201         struct e1000_hw *hw = &adapter->hw;
2202         struct sockaddr *addr = p;
2203
2204         if (!is_valid_ether_addr(addr->sa_data))
2205                 return -EADDRNOTAVAIL;
2206
2207         /* 82542 2.0 needs to be in reset to write receive address registers */
2208
2209         if (hw->mac_type == e1000_82542_rev2_0)
2210                 e1000_enter_82542_rst(adapter);
2211
2212         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2213         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2214
2215         e1000_rar_set(hw, hw->mac_addr, 0);
2216
2217         if (hw->mac_type == e1000_82542_rev2_0)
2218                 e1000_leave_82542_rst(adapter);
2219
2220         return 0;
2221 }
2222
2223 /**
2224  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2225  * @netdev: network interface device structure
2226  *
2227  * The set_rx_mode entry point is called whenever the unicast or multicast
2228  * address lists or the network interface flags are updated. This routine is
2229  * responsible for configuring the hardware for proper unicast, multicast,
2230  * promiscuous mode, and all-multi behavior.
2231  **/
2232 static void e1000_set_rx_mode(struct net_device *netdev)
2233 {
2234         struct e1000_adapter *adapter = netdev_priv(netdev);
2235         struct e1000_hw *hw = &adapter->hw;
2236         struct netdev_hw_addr *ha;
2237         bool use_uc = false;
2238         u32 rctl;
2239         u32 hash_value;
2240         int i, rar_entries = E1000_RAR_ENTRIES;
2241         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2242         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2243
2244         if (!mcarray)
2245                 return;
2246
2247         /* Check for Promiscuous and All Multicast modes */
2248
2249         rctl = er32(RCTL);
2250
2251         if (netdev->flags & IFF_PROMISC) {
2252                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2253                 rctl &= ~E1000_RCTL_VFE;
2254         } else {
2255                 if (netdev->flags & IFF_ALLMULTI)
2256                         rctl |= E1000_RCTL_MPE;
2257                 else
2258                         rctl &= ~E1000_RCTL_MPE;
2259                 /* Enable VLAN filter if there is a VLAN */
2260                 if (e1000_vlan_used(adapter))
2261                         rctl |= E1000_RCTL_VFE;
2262         }
2263
2264         if (netdev_uc_count(netdev) > rar_entries - 1) {
2265                 rctl |= E1000_RCTL_UPE;
2266         } else if (!(netdev->flags & IFF_PROMISC)) {
2267                 rctl &= ~E1000_RCTL_UPE;
2268                 use_uc = true;
2269         }
2270
2271         ew32(RCTL, rctl);
2272
2273         /* 82542 2.0 needs to be in reset to write receive address registers */
2274
2275         if (hw->mac_type == e1000_82542_rev2_0)
2276                 e1000_enter_82542_rst(adapter);
2277
2278         /* load the first 14 addresses into the exact filters 1-14. Unicast
2279          * addresses take precedence to avoid disabling unicast filtering
2280          * when possible.
2281          *
2282          * RAR 0 is used for the station MAC address
2283          * if there are not 14 addresses, go ahead and clear the filters
2284          */
2285         i = 1;
2286         if (use_uc)
2287                 netdev_for_each_uc_addr(ha, netdev) {
2288                         if (i == rar_entries)
2289                                 break;
2290                         e1000_rar_set(hw, ha->addr, i++);
2291                 }
2292
2293         netdev_for_each_mc_addr(ha, netdev) {
2294                 if (i == rar_entries) {
2295                         /* load any remaining addresses into the hash table */
2296                         u32 hash_reg, hash_bit, mta;
2297                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2298                         hash_reg = (hash_value >> 5) & 0x7F;
2299                         hash_bit = hash_value & 0x1F;
2300                         mta = (1 << hash_bit);
2301                         mcarray[hash_reg] |= mta;
2302                 } else {
2303                         e1000_rar_set(hw, ha->addr, i++);
2304                 }
2305         }
2306
2307         for (; i < rar_entries; i++) {
2308                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2309                 E1000_WRITE_FLUSH();
2310                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2311                 E1000_WRITE_FLUSH();
2312         }
2313
2314         /* write the hash table completely, write from bottom to avoid
2315          * both stupid write combining chipsets, and flushing each write
2316          */
2317         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2318                 /* If we are on an 82544 has an errata where writing odd
2319                  * offsets overwrites the previous even offset, but writing
2320                  * backwards over the range solves the issue by always
2321                  * writing the odd offset first
2322                  */
2323                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2324         }
2325         E1000_WRITE_FLUSH();
2326
2327         if (hw->mac_type == e1000_82542_rev2_0)
2328                 e1000_leave_82542_rst(adapter);
2329
2330         kfree(mcarray);
2331 }
2332
2333 /**
2334  * e1000_update_phy_info_task - get phy info
2335  * @work: work struct contained inside adapter struct
2336  *
2337  * Need to wait a few seconds after link up to get diagnostic information from
2338  * the phy
2339  */
2340 static void e1000_update_phy_info_task(struct work_struct *work)
2341 {
2342         struct e1000_adapter *adapter = container_of(work,
2343                                                      struct e1000_adapter,
2344                                                      phy_info_task.work);
2345
2346         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2347 }
2348
2349 /**
2350  * e1000_82547_tx_fifo_stall_task - task to complete work
2351  * @work: work struct contained inside adapter struct
2352  **/
2353 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2354 {
2355         struct e1000_adapter *adapter = container_of(work,
2356                                                      struct e1000_adapter,
2357                                                      fifo_stall_task.work);
2358         struct e1000_hw *hw = &adapter->hw;
2359         struct net_device *netdev = adapter->netdev;
2360         u32 tctl;
2361
2362         if (atomic_read(&adapter->tx_fifo_stall)) {
2363                 if ((er32(TDT) == er32(TDH)) &&
2364                    (er32(TDFT) == er32(TDFH)) &&
2365                    (er32(TDFTS) == er32(TDFHS))) {
2366                         tctl = er32(TCTL);
2367                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2368                         ew32(TDFT, adapter->tx_head_addr);
2369                         ew32(TDFH, adapter->tx_head_addr);
2370                         ew32(TDFTS, adapter->tx_head_addr);
2371                         ew32(TDFHS, adapter->tx_head_addr);
2372                         ew32(TCTL, tctl);
2373                         E1000_WRITE_FLUSH();
2374
2375                         adapter->tx_fifo_head = 0;
2376                         atomic_set(&adapter->tx_fifo_stall, 0);
2377                         netif_wake_queue(netdev);
2378                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2379                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2380                 }
2381         }
2382 }
2383
2384 bool e1000_has_link(struct e1000_adapter *adapter)
2385 {
2386         struct e1000_hw *hw = &adapter->hw;
2387         bool link_active = false;
2388
2389         /* get_link_status is set on LSC (link status) interrupt or rx
2390          * sequence error interrupt (except on intel ce4100).
2391          * get_link_status will stay false until the
2392          * e1000_check_for_link establishes link for copper adapters
2393          * ONLY
2394          */
2395         switch (hw->media_type) {
2396         case e1000_media_type_copper:
2397                 if (hw->mac_type == e1000_ce4100)
2398                         hw->get_link_status = 1;
2399                 if (hw->get_link_status) {
2400                         e1000_check_for_link(hw);
2401                         link_active = !hw->get_link_status;
2402                 } else {
2403                         link_active = true;
2404                 }
2405                 break;
2406         case e1000_media_type_fiber:
2407                 e1000_check_for_link(hw);
2408                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2409                 break;
2410         case e1000_media_type_internal_serdes:
2411                 e1000_check_for_link(hw);
2412                 link_active = hw->serdes_has_link;
2413                 break;
2414         default:
2415                 break;
2416         }
2417
2418         return link_active;
2419 }
2420
2421 /**
2422  * e1000_watchdog - work function
2423  * @work: work struct contained inside adapter struct
2424  **/
2425 static void e1000_watchdog(struct work_struct *work)
2426 {
2427         struct e1000_adapter *adapter = container_of(work,
2428                                                      struct e1000_adapter,
2429                                                      watchdog_task.work);
2430         struct e1000_hw *hw = &adapter->hw;
2431         struct net_device *netdev = adapter->netdev;
2432         struct e1000_tx_ring *txdr = adapter->tx_ring;
2433         u32 link, tctl;
2434
2435         link = e1000_has_link(adapter);
2436         if ((netif_carrier_ok(netdev)) && link)
2437                 goto link_up;
2438
2439         if (link) {
2440                 if (!netif_carrier_ok(netdev)) {
2441                         u32 ctrl;
2442                         /* update snapshot of PHY registers on LSC */
2443                         e1000_get_speed_and_duplex(hw,
2444                                                    &adapter->link_speed,
2445                                                    &adapter->link_duplex);
2446
2447                         ctrl = er32(CTRL);
2448                         pr_info("%s NIC Link is Up %d Mbps %s, "
2449                                 "Flow Control: %s\n",
2450                                 netdev->name,
2451                                 adapter->link_speed,
2452                                 adapter->link_duplex == FULL_DUPLEX ?
2453                                 "Full Duplex" : "Half Duplex",
2454                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2455                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2456                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2457                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2458
2459                         /* adjust timeout factor according to speed/duplex */
2460                         adapter->tx_timeout_factor = 1;
2461                         switch (adapter->link_speed) {
2462                         case SPEED_10:
2463                                 adapter->tx_timeout_factor = 16;
2464                                 break;
2465                         case SPEED_100:
2466                                 /* maybe add some timeout factor ? */
2467                                 break;
2468                         }
2469
2470                         /* enable transmits in the hardware */
2471                         tctl = er32(TCTL);
2472                         tctl |= E1000_TCTL_EN;
2473                         ew32(TCTL, tctl);
2474
2475                         netif_carrier_on(netdev);
2476                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2477                                 schedule_delayed_work(&adapter->phy_info_task,
2478                                                       2 * HZ);
2479                         adapter->smartspeed = 0;
2480                 }
2481         } else {
2482                 if (netif_carrier_ok(netdev)) {
2483                         adapter->link_speed = 0;
2484                         adapter->link_duplex = 0;
2485                         pr_info("%s NIC Link is Down\n",
2486                                 netdev->name);
2487                         netif_carrier_off(netdev);
2488
2489                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2490                                 schedule_delayed_work(&adapter->phy_info_task,
2491                                                       2 * HZ);
2492                 }
2493
2494                 e1000_smartspeed(adapter);
2495         }
2496
2497 link_up:
2498         e1000_update_stats(adapter);
2499
2500         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2501         adapter->tpt_old = adapter->stats.tpt;
2502         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2503         adapter->colc_old = adapter->stats.colc;
2504
2505         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2506         adapter->gorcl_old = adapter->stats.gorcl;
2507         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2508         adapter->gotcl_old = adapter->stats.gotcl;
2509
2510         e1000_update_adaptive(hw);
2511
2512         if (!netif_carrier_ok(netdev)) {
2513                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2514                         /* We've lost link, so the controller stops DMA,
2515                          * but we've got queued Tx work that's never going
2516                          * to get done, so reset controller to flush Tx.
2517                          * (Do the reset outside of interrupt context).
2518                          */
2519                         adapter->tx_timeout_count++;
2520                         schedule_work(&adapter->reset_task);
2521                         /* exit immediately since reset is imminent */
2522                         return;
2523                 }
2524         }
2525
2526         /* Simple mode for Interrupt Throttle Rate (ITR) */
2527         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2528                 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2529                  * Total asymmetrical Tx or Rx gets ITR=8000;
2530                  * everyone else is between 2000-8000.
2531                  */
2532                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2533                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2534                             adapter->gotcl - adapter->gorcl :
2535                             adapter->gorcl - adapter->gotcl) / 10000;
2536                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2537
2538                 ew32(ITR, 1000000000 / (itr * 256));
2539         }
2540
2541         /* Cause software interrupt to ensure rx ring is cleaned */
2542         ew32(ICS, E1000_ICS_RXDMT0);
2543
2544         /* Force detection of hung controller every watchdog period */
2545         adapter->detect_tx_hung = true;
2546
2547         /* Reschedule the task */
2548         if (!test_bit(__E1000_DOWN, &adapter->flags))
2549                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2550 }
2551
2552 enum latency_range {
2553         lowest_latency = 0,
2554         low_latency = 1,
2555         bulk_latency = 2,
2556         latency_invalid = 255
2557 };
2558
2559 /**
2560  * e1000_update_itr - update the dynamic ITR value based on statistics
2561  * @adapter: pointer to adapter
2562  * @itr_setting: current adapter->itr
2563  * @packets: the number of packets during this measurement interval
2564  * @bytes: the number of bytes during this measurement interval
2565  *
2566  *      Stores a new ITR value based on packets and byte
2567  *      counts during the last interrupt.  The advantage of per interrupt
2568  *      computation is faster updates and more accurate ITR for the current
2569  *      traffic pattern.  Constants in this function were computed
2570  *      based on theoretical maximum wire speed and thresholds were set based
2571  *      on testing data as well as attempting to minimize response time
2572  *      while increasing bulk throughput.
2573  *      this functionality is controlled by the InterruptThrottleRate module
2574  *      parameter (see e1000_param.c)
2575  **/
2576 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2577                                      u16 itr_setting, int packets, int bytes)
2578 {
2579         unsigned int retval = itr_setting;
2580         struct e1000_hw *hw = &adapter->hw;
2581
2582         if (unlikely(hw->mac_type < e1000_82540))
2583                 goto update_itr_done;
2584
2585         if (packets == 0)
2586                 goto update_itr_done;
2587
2588         switch (itr_setting) {
2589         case lowest_latency:
2590                 /* jumbo frames get bulk treatment*/
2591                 if (bytes/packets > 8000)
2592                         retval = bulk_latency;
2593                 else if ((packets < 5) && (bytes > 512))
2594                         retval = low_latency;
2595                 break;
2596         case low_latency:  /* 50 usec aka 20000 ints/s */
2597                 if (bytes > 10000) {
2598                         /* jumbo frames need bulk latency setting */
2599                         if (bytes/packets > 8000)
2600                                 retval = bulk_latency;
2601                         else if ((packets < 10) || ((bytes/packets) > 1200))
2602                                 retval = bulk_latency;
2603                         else if ((packets > 35))
2604                                 retval = lowest_latency;
2605                 } else if (bytes/packets > 2000)
2606                         retval = bulk_latency;
2607                 else if (packets <= 2 && bytes < 512)
2608                         retval = lowest_latency;
2609                 break;
2610         case bulk_latency: /* 250 usec aka 4000 ints/s */
2611                 if (bytes > 25000) {
2612                         if (packets > 35)
2613                                 retval = low_latency;
2614                 } else if (bytes < 6000) {
2615                         retval = low_latency;
2616                 }
2617                 break;
2618         }
2619
2620 update_itr_done:
2621         return retval;
2622 }
2623
2624 static void e1000_set_itr(struct e1000_adapter *adapter)
2625 {
2626         struct e1000_hw *hw = &adapter->hw;
2627         u16 current_itr;
2628         u32 new_itr = adapter->itr;
2629
2630         if (unlikely(hw->mac_type < e1000_82540))
2631                 return;
2632
2633         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2634         if (unlikely(adapter->link_speed != SPEED_1000)) {
2635                 new_itr = 4000;
2636                 goto set_itr_now;
2637         }
2638
2639         adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2640                                            adapter->total_tx_packets,
2641                                            adapter->total_tx_bytes);
2642         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2643         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2644                 adapter->tx_itr = low_latency;
2645
2646         adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2647                                            adapter->total_rx_packets,
2648                                            adapter->total_rx_bytes);
2649         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2650         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2651                 adapter->rx_itr = low_latency;
2652
2653         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2654
2655         switch (current_itr) {
2656         /* counts and packets in update_itr are dependent on these numbers */
2657         case lowest_latency:
2658                 new_itr = 70000;
2659                 break;
2660         case low_latency:
2661                 new_itr = 20000; /* aka hwitr = ~200 */
2662                 break;
2663         case bulk_latency:
2664                 new_itr = 4000;
2665                 break;
2666         default:
2667                 break;
2668         }
2669
2670 set_itr_now:
2671         if (new_itr != adapter->itr) {
2672                 /* this attempts to bias the interrupt rate towards Bulk
2673                  * by adding intermediate steps when interrupt rate is
2674                  * increasing
2675                  */
2676                 new_itr = new_itr > adapter->itr ?
2677                           min(adapter->itr + (new_itr >> 2), new_itr) :
2678                           new_itr;
2679                 adapter->itr = new_itr;
2680                 ew32(ITR, 1000000000 / (new_itr * 256));
2681         }
2682 }
2683
2684 #define E1000_TX_FLAGS_CSUM             0x00000001
2685 #define E1000_TX_FLAGS_VLAN             0x00000002
2686 #define E1000_TX_FLAGS_TSO              0x00000004
2687 #define E1000_TX_FLAGS_IPV4             0x00000008
2688 #define E1000_TX_FLAGS_NO_FCS           0x00000010
2689 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2690 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2691
2692 static int e1000_tso(struct e1000_adapter *adapter,
2693                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2694                      __be16 protocol)
2695 {
2696         struct e1000_context_desc *context_desc;
2697         struct e1000_tx_buffer *buffer_info;
2698         unsigned int i;
2699         u32 cmd_length = 0;
2700         u16 ipcse = 0, tucse, mss;
2701         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2702
2703         if (skb_is_gso(skb)) {
2704                 int err;
2705
2706                 err = skb_cow_head(skb, 0);
2707                 if (err < 0)
2708                         return err;
2709
2710                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2711                 mss = skb_shinfo(skb)->gso_size;
2712                 if (protocol == htons(ETH_P_IP)) {
2713                         struct iphdr *iph = ip_hdr(skb);
2714                         iph->tot_len = 0;
2715                         iph->check = 0;
2716                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2717                                                                  iph->daddr, 0,
2718                                                                  IPPROTO_TCP,
2719                                                                  0);
2720                         cmd_length = E1000_TXD_CMD_IP;
2721                         ipcse = skb_transport_offset(skb) - 1;
2722                 } else if (skb_is_gso_v6(skb)) {
2723                         tcp_v6_gso_csum_prep(skb);
2724                         ipcse = 0;
2725                 }
2726                 ipcss = skb_network_offset(skb);
2727                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2728                 tucss = skb_transport_offset(skb);
2729                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2730                 tucse = 0;
2731
2732                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2733                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2734
2735                 i = tx_ring->next_to_use;
2736                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2737                 buffer_info = &tx_ring->buffer_info[i];
2738
2739                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2740                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2741                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2742                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2743                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2744                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2745                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2746                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2747                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2748
2749                 buffer_info->time_stamp = jiffies;
2750                 buffer_info->next_to_watch = i;
2751
2752                 if (++i == tx_ring->count)
2753                         i = 0;
2754
2755                 tx_ring->next_to_use = i;
2756
2757                 return true;
2758         }
2759         return false;
2760 }
2761
2762 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2763                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2764                           __be16 protocol)
2765 {
2766         struct e1000_context_desc *context_desc;
2767         struct e1000_tx_buffer *buffer_info;
2768         unsigned int i;
2769         u8 css;
2770         u32 cmd_len = E1000_TXD_CMD_DEXT;
2771
2772         if (skb->ip_summed != CHECKSUM_PARTIAL)
2773                 return false;
2774
2775         switch (protocol) {
2776         case cpu_to_be16(ETH_P_IP):
2777                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2778                         cmd_len |= E1000_TXD_CMD_TCP;
2779                 break;
2780         case cpu_to_be16(ETH_P_IPV6):
2781                 /* XXX not handling all IPV6 headers */
2782                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2783                         cmd_len |= E1000_TXD_CMD_TCP;
2784                 break;
2785         default:
2786                 if (unlikely(net_ratelimit()))
2787                         e_warn(drv, "checksum_partial proto=%x!\n",
2788                                skb->protocol);
2789                 break;
2790         }
2791
2792         css = skb_checksum_start_offset(skb);
2793
2794         i = tx_ring->next_to_use;
2795         buffer_info = &tx_ring->buffer_info[i];
2796         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2797
2798         context_desc->lower_setup.ip_config = 0;
2799         context_desc->upper_setup.tcp_fields.tucss = css;
2800         context_desc->upper_setup.tcp_fields.tucso =
2801                 css + skb->csum_offset;
2802         context_desc->upper_setup.tcp_fields.tucse = 0;
2803         context_desc->tcp_seg_setup.data = 0;
2804         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2805
2806         buffer_info->time_stamp = jiffies;
2807         buffer_info->next_to_watch = i;
2808
2809         if (unlikely(++i == tx_ring->count))
2810                 i = 0;
2811
2812         tx_ring->next_to_use = i;
2813
2814         return true;
2815 }
2816
2817 #define E1000_MAX_TXD_PWR       12
2818 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2819
2820 static int e1000_tx_map(struct e1000_adapter *adapter,
2821                         struct e1000_tx_ring *tx_ring,
2822                         struct sk_buff *skb, unsigned int first,
2823                         unsigned int max_per_txd, unsigned int nr_frags,
2824                         unsigned int mss)
2825 {
2826         struct e1000_hw *hw = &adapter->hw;
2827         struct pci_dev *pdev = adapter->pdev;
2828         struct e1000_tx_buffer *buffer_info;
2829         unsigned int len = skb_headlen(skb);
2830         unsigned int offset = 0, size, count = 0, i;
2831         unsigned int f, bytecount, segs;
2832
2833         i = tx_ring->next_to_use;
2834
2835         while (len) {
2836                 buffer_info = &tx_ring->buffer_info[i];
2837                 size = min(len, max_per_txd);
2838                 /* Workaround for Controller erratum --
2839                  * descriptor for non-tso packet in a linear SKB that follows a
2840                  * tso gets written back prematurely before the data is fully
2841                  * DMA'd to the controller
2842                  */
2843                 if (!skb->data_len && tx_ring->last_tx_tso &&
2844                     !skb_is_gso(skb)) {
2845                         tx_ring->last_tx_tso = false;
2846                         size -= 4;
2847                 }
2848
2849                 /* Workaround for premature desc write-backs
2850                  * in TSO mode.  Append 4-byte sentinel desc
2851                  */
2852                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2853                         size -= 4;
2854                 /* work-around for errata 10 and it applies
2855                  * to all controllers in PCI-X mode
2856                  * The fix is to make sure that the first descriptor of a
2857                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2858                  */
2859                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2860                              (size > 2015) && count == 0))
2861                         size = 2015;
2862
2863                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2864                  * terminating buffers within evenly-aligned dwords.
2865                  */
2866                 if (unlikely(adapter->pcix_82544 &&
2867                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2868                    size > 4))
2869                         size -= 4;
2870
2871                 buffer_info->length = size;
2872                 /* set time_stamp *before* dma to help avoid a possible race */
2873                 buffer_info->time_stamp = jiffies;
2874                 buffer_info->mapped_as_page = false;
2875                 buffer_info->dma = dma_map_single(&pdev->dev,
2876                                                   skb->data + offset,
2877                                                   size, DMA_TO_DEVICE);
2878                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2879                         goto dma_error;
2880                 buffer_info->next_to_watch = i;
2881
2882                 len -= size;
2883                 offset += size;
2884                 count++;
2885                 if (len) {
2886                         i++;
2887                         if (unlikely(i == tx_ring->count))
2888                                 i = 0;
2889                 }
2890         }
2891
2892         for (f = 0; f < nr_frags; f++) {
2893                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2894
2895                 len = skb_frag_size(frag);
2896                 offset = 0;
2897
2898                 while (len) {
2899                         unsigned long bufend;
2900                         i++;
2901                         if (unlikely(i == tx_ring->count))
2902                                 i = 0;
2903
2904                         buffer_info = &tx_ring->buffer_info[i];
2905                         size = min(len, max_per_txd);
2906                         /* Workaround for premature desc write-backs
2907                          * in TSO mode.  Append 4-byte sentinel desc
2908                          */
2909                         if (unlikely(mss && f == (nr_frags-1) &&
2910                             size == len && size > 8))
2911                                 size -= 4;
2912                         /* Workaround for potential 82544 hang in PCI-X.
2913                          * Avoid terminating buffers within evenly-aligned
2914                          * dwords.
2915                          */
2916                         bufend = (unsigned long)
2917                                 page_to_phys(skb_frag_page(frag));
2918                         bufend += offset + size - 1;
2919                         if (unlikely(adapter->pcix_82544 &&
2920                                      !(bufend & 4) &&
2921                                      size > 4))
2922                                 size -= 4;
2923
2924                         buffer_info->length = size;
2925                         buffer_info->time_stamp = jiffies;
2926                         buffer_info->mapped_as_page = true;
2927                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2928                                                 offset, size, DMA_TO_DEVICE);
2929                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2930                                 goto dma_error;
2931                         buffer_info->next_to_watch = i;
2932
2933                         len -= size;
2934                         offset += size;
2935                         count++;
2936                 }
2937         }
2938
2939         segs = skb_shinfo(skb)->gso_segs ?: 1;
2940         /* multiply data chunks by size of headers */
2941         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2942
2943         tx_ring->buffer_info[i].skb = skb;
2944         tx_ring->buffer_info[i].segs = segs;
2945         tx_ring->buffer_info[i].bytecount = bytecount;
2946         tx_ring->buffer_info[first].next_to_watch = i;
2947
2948         return count;
2949
2950 dma_error:
2951         dev_err(&pdev->dev, "TX DMA map failed\n");
2952         buffer_info->dma = 0;
2953         if (count)
2954                 count--;
2955
2956         while (count--) {
2957                 if (i == 0)
2958                         i += tx_ring->count;
2959                 i--;
2960                 buffer_info = &tx_ring->buffer_info[i];
2961                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2962         }
2963
2964         return 0;
2965 }
2966
2967 static void e1000_tx_queue(struct e1000_adapter *adapter,
2968                            struct e1000_tx_ring *tx_ring, int tx_flags,
2969                            int count)
2970 {
2971         struct e1000_tx_desc *tx_desc = NULL;
2972         struct e1000_tx_buffer *buffer_info;
2973         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2974         unsigned int i;
2975
2976         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2977                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2978                              E1000_TXD_CMD_TSE;
2979                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2980
2981                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2982                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2983         }
2984
2985         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2986                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2987                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2988         }
2989
2990         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2991                 txd_lower |= E1000_TXD_CMD_VLE;
2992                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2993         }
2994
2995         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2996                 txd_lower &= ~(E1000_TXD_CMD_IFCS);
2997
2998         i = tx_ring->next_to_use;
2999
3000         while (count--) {
3001                 buffer_info = &tx_ring->buffer_info[i];
3002                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3003                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3004                 tx_desc->lower.data =
3005                         cpu_to_le32(txd_lower | buffer_info->length);
3006                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3007                 if (unlikely(++i == tx_ring->count))
3008                         i = 0;
3009         }
3010
3011         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3012
3013         /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3014         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3015                 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3016
3017         /* Force memory writes to complete before letting h/w
3018          * know there are new descriptors to fetch.  (Only
3019          * applicable for weak-ordered memory model archs,
3020          * such as IA-64).
3021          */
3022         dma_wmb();
3023
3024         tx_ring->next_to_use = i;
3025 }
3026
3027 /* 82547 workaround to avoid controller hang in half-duplex environment.
3028  * The workaround is to avoid queuing a large packet that would span
3029  * the internal Tx FIFO ring boundary by notifying the stack to resend
3030  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3031  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3032  * to the beginning of the Tx FIFO.
3033  */
3034
3035 #define E1000_FIFO_HDR                  0x10
3036 #define E1000_82547_PAD_LEN             0x3E0
3037
3038 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3039                                        struct sk_buff *skb)
3040 {
3041         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3042         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3043
3044         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3045
3046         if (adapter->link_duplex != HALF_DUPLEX)
3047                 goto no_fifo_stall_required;
3048
3049         if (atomic_read(&adapter->tx_fifo_stall))
3050                 return 1;
3051
3052         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3053                 atomic_set(&adapter->tx_fifo_stall, 1);
3054                 return 1;
3055         }
3056
3057 no_fifo_stall_required:
3058         adapter->tx_fifo_head += skb_fifo_len;
3059         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3060                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3061         return 0;
3062 }
3063
3064 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3065 {
3066         struct e1000_adapter *adapter = netdev_priv(netdev);
3067         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3068
3069         netif_stop_queue(netdev);
3070         /* Herbert's original patch had:
3071          *  smp_mb__after_netif_stop_queue();
3072          * but since that doesn't exist yet, just open code it.
3073          */
3074         smp_mb();
3075
3076         /* We need to check again in a case another CPU has just
3077          * made room available.
3078          */
3079         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3080                 return -EBUSY;
3081
3082         /* A reprieve! */
3083         netif_start_queue(netdev);
3084         ++adapter->restart_queue;
3085         return 0;
3086 }
3087
3088 static int e1000_maybe_stop_tx(struct net_device *netdev,
3089                                struct e1000_tx_ring *tx_ring, int size)
3090 {
3091         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3092                 return 0;
3093         return __e1000_maybe_stop_tx(netdev, size);
3094 }
3095
3096 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3097 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3098                                     struct net_device *netdev)
3099 {
3100         struct e1000_adapter *adapter = netdev_priv(netdev);
3101         struct e1000_hw *hw = &adapter->hw;
3102         struct e1000_tx_ring *tx_ring;
3103         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3104         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3105         unsigned int tx_flags = 0;
3106         unsigned int len = skb_headlen(skb);
3107         unsigned int nr_frags;
3108         unsigned int mss;
3109         int count = 0;
3110         int tso;
3111         unsigned int f;
3112         __be16 protocol = vlan_get_protocol(skb);
3113
3114         /* This goes back to the question of how to logically map a Tx queue
3115          * to a flow.  Right now, performance is impacted slightly negatively
3116          * if using multiple Tx queues.  If the stack breaks away from a
3117          * single qdisc implementation, we can look at this again.
3118          */
3119         tx_ring = adapter->tx_ring;
3120
3121         /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3122          * packets may get corrupted during padding by HW.
3123          * To WA this issue, pad all small packets manually.
3124          */
3125         if (eth_skb_pad(skb))
3126                 return NETDEV_TX_OK;
3127
3128         mss = skb_shinfo(skb)->gso_size;
3129         /* The controller does a simple calculation to
3130          * make sure there is enough room in the FIFO before
3131          * initiating the DMA for each buffer.  The calc is:
3132          * 4 = ceil(buffer len/mss).  To make sure we don't
3133          * overrun the FIFO, adjust the max buffer len if mss
3134          * drops.
3135          */
3136         if (mss) {
3137                 u8 hdr_len;
3138                 max_per_txd = min(mss << 2, max_per_txd);
3139                 max_txd_pwr = fls(max_per_txd) - 1;
3140
3141                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3142                 if (skb->data_len && hdr_len == len) {
3143                         switch (hw->mac_type) {
3144                         case e1000_82544: {
3145                                 unsigned int pull_size;
3146
3147                                 /* Make sure we have room to chop off 4 bytes,
3148                                  * and that the end alignment will work out to
3149                                  * this hardware's requirements
3150                                  * NOTE: this is a TSO only workaround
3151                                  * if end byte alignment not correct move us
3152                                  * into the next dword
3153                                  */
3154                                 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3155                                     & 4)
3156                                         break;
3157                                 pull_size = min((unsigned int)4, skb->data_len);
3158                                 if (!__pskb_pull_tail(skb, pull_size)) {
3159                                         e_err(drv, "__pskb_pull_tail "
3160                                               "failed.\n");
3161                                         dev_kfree_skb_any(skb);
3162                                         return NETDEV_TX_OK;
3163                                 }
3164                                 len = skb_headlen(skb);
3165                                 break;
3166                         }
3167                         default:
3168                                 /* do nothing */
3169                                 break;
3170                         }
3171                 }
3172         }
3173
3174         /* reserve a descriptor for the offload context */
3175         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3176                 count++;
3177         count++;
3178
3179         /* Controller Erratum workaround */
3180         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3181                 count++;
3182
3183         count += TXD_USE_COUNT(len, max_txd_pwr);
3184
3185         if (adapter->pcix_82544)
3186                 count++;
3187
3188         /* work-around for errata 10 and it applies to all controllers
3189          * in PCI-X mode, so add one more descriptor to the count
3190          */
3191         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3192                         (len > 2015)))
3193                 count++;
3194
3195         nr_frags = skb_shinfo(skb)->nr_frags;
3196         for (f = 0; f < nr_frags; f++)
3197                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3198                                        max_txd_pwr);
3199         if (adapter->pcix_82544)
3200                 count += nr_frags;
3201
3202         /* need: count + 2 desc gap to keep tail from touching
3203          * head, otherwise try next time
3204          */
3205         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3206                 return NETDEV_TX_BUSY;
3207
3208         if (unlikely((hw->mac_type == e1000_82547) &&
3209                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3210                 netif_stop_queue(netdev);
3211                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3212                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3213                 return NETDEV_TX_BUSY;
3214         }
3215
3216         if (skb_vlan_tag_present(skb)) {
3217                 tx_flags |= E1000_TX_FLAGS_VLAN;
3218                 tx_flags |= (skb_vlan_tag_get(skb) <<
3219                              E1000_TX_FLAGS_VLAN_SHIFT);
3220         }
3221
3222         first = tx_ring->next_to_use;
3223
3224         tso = e1000_tso(adapter, tx_ring, skb, protocol);
3225         if (tso < 0) {
3226                 dev_kfree_skb_any(skb);
3227                 return NETDEV_TX_OK;
3228         }
3229
3230         if (likely(tso)) {
3231                 if (likely(hw->mac_type != e1000_82544))
3232                         tx_ring->last_tx_tso = true;
3233                 tx_flags |= E1000_TX_FLAGS_TSO;
3234         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3235                 tx_flags |= E1000_TX_FLAGS_CSUM;
3236
3237         if (protocol == htons(ETH_P_IP))
3238                 tx_flags |= E1000_TX_FLAGS_IPV4;
3239
3240         if (unlikely(skb->no_fcs))
3241                 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3242
3243         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3244                              nr_frags, mss);
3245
3246         if (count) {
3247                 /* The descriptors needed is higher than other Intel drivers
3248                  * due to a number of workarounds.  The breakdown is below:
3249                  * Data descriptors: MAX_SKB_FRAGS + 1
3250                  * Context Descriptor: 1
3251                  * Keep head from touching tail: 2
3252                  * Workarounds: 3
3253                  */
3254                 int desc_needed = MAX_SKB_FRAGS + 7;
3255
3256                 netdev_sent_queue(netdev, skb->len);
3257                 skb_tx_timestamp(skb);
3258
3259                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3260
3261                 /* 82544 potentially requires twice as many data descriptors
3262                  * in order to guarantee buffers don't end on evenly-aligned
3263                  * dwords
3264                  */
3265                 if (adapter->pcix_82544)
3266                         desc_needed += MAX_SKB_FRAGS + 1;
3267
3268                 /* Make sure there is space in the ring for the next send. */
3269                 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3270
3271                 if (!netdev_xmit_more() ||
3272                     netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3273                         writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3274                 }
3275         } else {
3276                 dev_kfree_skb_any(skb);
3277                 tx_ring->buffer_info[first].time_stamp = 0;
3278                 tx_ring->next_to_use = first;
3279         }
3280
3281         return NETDEV_TX_OK;
3282 }
3283
3284 #define NUM_REGS 38 /* 1 based count */
3285 static void e1000_regdump(struct e1000_adapter *adapter)
3286 {
3287         struct e1000_hw *hw = &adapter->hw;
3288         u32 regs[NUM_REGS];
3289         u32 *regs_buff = regs;
3290         int i = 0;
3291
3292         static const char * const reg_name[] = {
3293                 "CTRL",  "STATUS",
3294                 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3295                 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3296                 "TIDV", "TXDCTL", "TADV", "TARC0",
3297                 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3298                 "TXDCTL1", "TARC1",
3299                 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3300                 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3301                 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3302         };
3303
3304         regs_buff[0]  = er32(CTRL);
3305         regs_buff[1]  = er32(STATUS);
3306
3307         regs_buff[2]  = er32(RCTL);
3308         regs_buff[3]  = er32(RDLEN);
3309         regs_buff[4]  = er32(RDH);
3310         regs_buff[5]  = er32(RDT);
3311         regs_buff[6]  = er32(RDTR);
3312
3313         regs_buff[7]  = er32(TCTL);
3314         regs_buff[8]  = er32(TDBAL);
3315         regs_buff[9]  = er32(TDBAH);
3316         regs_buff[10] = er32(TDLEN);
3317         regs_buff[11] = er32(TDH);
3318         regs_buff[12] = er32(TDT);
3319         regs_buff[13] = er32(TIDV);
3320         regs_buff[14] = er32(TXDCTL);
3321         regs_buff[15] = er32(TADV);
3322         regs_buff[16] = er32(TARC0);
3323
3324         regs_buff[17] = er32(TDBAL1);
3325         regs_buff[18] = er32(TDBAH1);
3326         regs_buff[19] = er32(TDLEN1);
3327         regs_buff[20] = er32(TDH1);
3328         regs_buff[21] = er32(TDT1);
3329         regs_buff[22] = er32(TXDCTL1);
3330         regs_buff[23] = er32(TARC1);
3331         regs_buff[24] = er32(CTRL_EXT);
3332         regs_buff[25] = er32(ERT);
3333         regs_buff[26] = er32(RDBAL0);
3334         regs_buff[27] = er32(RDBAH0);
3335         regs_buff[28] = er32(TDFH);
3336         regs_buff[29] = er32(TDFT);
3337         regs_buff[30] = er32(TDFHS);
3338         regs_buff[31] = er32(TDFTS);
3339         regs_buff[32] = er32(TDFPC);
3340         regs_buff[33] = er32(RDFH);
3341         regs_buff[34] = er32(RDFT);
3342         regs_buff[35] = er32(RDFHS);
3343         regs_buff[36] = er32(RDFTS);
3344         regs_buff[37] = er32(RDFPC);
3345
3346         pr_info("Register dump\n");
3347         for (i = 0; i < NUM_REGS; i++)
3348                 pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3349 }
3350
3351 /*
3352  * e1000_dump: Print registers, tx ring and rx ring
3353  */
3354 static void e1000_dump(struct e1000_adapter *adapter)
3355 {
3356         /* this code doesn't handle multiple rings */
3357         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3358         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3359         int i;
3360
3361         if (!netif_msg_hw(adapter))
3362                 return;
3363
3364         /* Print Registers */
3365         e1000_regdump(adapter);
3366
3367         /* transmit dump */
3368         pr_info("TX Desc ring0 dump\n");
3369
3370         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3371          *
3372          * Legacy Transmit Descriptor
3373          *   +--------------------------------------------------------------+
3374          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3375          *   +--------------------------------------------------------------+
3376          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3377          *   +--------------------------------------------------------------+
3378          *   63       48 47        36 35    32 31     24 23    16 15        0
3379          *
3380          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3381          *   63      48 47    40 39       32 31             16 15    8 7      0
3382          *   +----------------------------------------------------------------+
3383          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3384          *   +----------------------------------------------------------------+
3385          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3386          *   +----------------------------------------------------------------+
3387          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3388          *
3389          * Extended Data Descriptor (DTYP=0x1)
3390          *   +----------------------------------------------------------------+
3391          * 0 |                     Buffer Address [63:0]                      |
3392          *   +----------------------------------------------------------------+
3393          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3394          *   +----------------------------------------------------------------+
3395          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3396          */
3397         pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3398         pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3399
3400         if (!netif_msg_tx_done(adapter))
3401                 goto rx_ring_summary;
3402
3403         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3404                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3405                 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3406                 struct my_u { __le64 a; __le64 b; };
3407                 struct my_u *u = (struct my_u *)tx_desc;
3408                 const char *type;
3409
3410                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3411                         type = "NTC/U";
3412                 else if (i == tx_ring->next_to_use)
3413                         type = "NTU";
3414                 else if (i == tx_ring->next_to_clean)
3415                         type = "NTC";
3416                 else
3417                         type = "";
3418
3419                 pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3420                         ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3421                         le64_to_cpu(u->a), le64_to_cpu(u->b),
3422                         (u64)buffer_info->dma, buffer_info->length,
3423                         buffer_info->next_to_watch,
3424                         (u64)buffer_info->time_stamp, buffer_info->skb, type);
3425         }
3426
3427 rx_ring_summary:
3428         /* receive dump */
3429         pr_info("\nRX Desc ring dump\n");
3430
3431         /* Legacy Receive Descriptor Format
3432          *
3433          * +-----------------------------------------------------+
3434          * |                Buffer Address [63:0]                |
3435          * +-----------------------------------------------------+
3436          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3437          * +-----------------------------------------------------+
3438          * 63       48 47    40 39      32 31         16 15      0
3439          */
3440         pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3441
3442         if (!netif_msg_rx_status(adapter))
3443                 goto exit;
3444
3445         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3446                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3447                 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3448                 struct my_u { __le64 a; __le64 b; };
3449                 struct my_u *u = (struct my_u *)rx_desc;
3450                 const char *type;
3451
3452                 if (i == rx_ring->next_to_use)
3453                         type = "NTU";
3454                 else if (i == rx_ring->next_to_clean)
3455                         type = "NTC";
3456                 else
3457                         type = "";
3458
3459                 pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3460                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3461                         (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3462         } /* for */
3463
3464         /* dump the descriptor caches */
3465         /* rx */
3466         pr_info("Rx descriptor cache in 64bit format\n");
3467         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3468                 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3469                         i,
3470                         readl(adapter->hw.hw_addr + i+4),
3471                         readl(adapter->hw.hw_addr + i),
3472                         readl(adapter->hw.hw_addr + i+12),
3473                         readl(adapter->hw.hw_addr + i+8));
3474         }
3475         /* tx */
3476         pr_info("Tx descriptor cache in 64bit format\n");
3477         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3478                 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3479                         i,
3480                         readl(adapter->hw.hw_addr + i+4),
3481                         readl(adapter->hw.hw_addr + i),
3482                         readl(adapter->hw.hw_addr + i+12),
3483                         readl(adapter->hw.hw_addr + i+8));
3484         }
3485 exit:
3486         return;
3487 }
3488
3489 /**
3490  * e1000_tx_timeout - Respond to a Tx Hang
3491  * @netdev: network interface device structure
3492  * @txqueue: number of the Tx queue that hung (unused)
3493  **/
3494 static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3495 {
3496         struct e1000_adapter *adapter = netdev_priv(netdev);
3497
3498         /* Do the reset outside of interrupt context */
3499         adapter->tx_timeout_count++;
3500         schedule_work(&adapter->reset_task);
3501 }
3502
3503 static void e1000_reset_task(struct work_struct *work)
3504 {
3505         struct e1000_adapter *adapter =
3506                 container_of(work, struct e1000_adapter, reset_task);
3507
3508         e_err(drv, "Reset adapter\n");
3509         e1000_reinit_locked(adapter);
3510 }
3511
3512 /**
3513  * e1000_change_mtu - Change the Maximum Transfer Unit
3514  * @netdev: network interface device structure
3515  * @new_mtu: new value for maximum frame size
3516  *
3517  * Returns 0 on success, negative on failure
3518  **/
3519 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3520 {
3521         struct e1000_adapter *adapter = netdev_priv(netdev);
3522         struct e1000_hw *hw = &adapter->hw;
3523         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3524
3525         /* Adapter-specific max frame size limits. */
3526         switch (hw->mac_type) {
3527         case e1000_undefined ... e1000_82542_rev2_1:
3528                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3529                         e_err(probe, "Jumbo Frames not supported.\n");
3530                         return -EINVAL;
3531                 }
3532                 break;
3533         default:
3534                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3535                 break;
3536         }
3537
3538         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3539                 msleep(1);
3540         /* e1000_down has a dependency on max_frame_size */
3541         hw->max_frame_size = max_frame;
3542         if (netif_running(netdev)) {
3543                 /* prevent buffers from being reallocated */
3544                 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3545                 e1000_down(adapter);
3546         }
3547
3548         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3549          * means we reserve 2 more, this pushes us to allocate from the next
3550          * larger slab size.
3551          * i.e. RXBUFFER_2048 --> size-4096 slab
3552          * however with the new *_jumbo_rx* routines, jumbo receives will use
3553          * fragmented skbs
3554          */
3555
3556         if (max_frame <= E1000_RXBUFFER_2048)
3557                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3558         else
3559 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3560                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3561 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3562                 adapter->rx_buffer_len = PAGE_SIZE;
3563 #endif
3564
3565         /* adjust allocation if LPE protects us, and we aren't using SBP */
3566         if (!hw->tbi_compatibility_on &&
3567             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3568              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3569                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3570
3571         netdev_dbg(netdev, "changing MTU from %d to %d\n",
3572                    netdev->mtu, new_mtu);
3573         netdev->mtu = new_mtu;
3574
3575         if (netif_running(netdev))
3576                 e1000_up(adapter);
3577         else
3578                 e1000_reset(adapter);
3579
3580         clear_bit(__E1000_RESETTING, &adapter->flags);
3581
3582         return 0;
3583 }
3584
3585 /**
3586  * e1000_update_stats - Update the board statistics counters
3587  * @adapter: board private structure
3588  **/
3589 void e1000_update_stats(struct e1000_adapter *adapter)
3590 {
3591         struct net_device *netdev = adapter->netdev;
3592         struct e1000_hw *hw = &adapter->hw;
3593         struct pci_dev *pdev = adapter->pdev;
3594         unsigned long flags;
3595         u16 phy_tmp;
3596
3597 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3598
3599         /* Prevent stats update while adapter is being reset, or if the pci
3600          * connection is down.
3601          */
3602         if (adapter->link_speed == 0)
3603                 return;
3604         if (pci_channel_offline(pdev))
3605                 return;
3606
3607         spin_lock_irqsave(&adapter->stats_lock, flags);
3608
3609         /* these counters are modified from e1000_tbi_adjust_stats,
3610          * called from the interrupt context, so they must only
3611          * be written while holding adapter->stats_lock
3612          */
3613
3614         adapter->stats.crcerrs += er32(CRCERRS);
3615         adapter->stats.gprc += er32(GPRC);
3616         adapter->stats.gorcl += er32(GORCL);
3617         adapter->stats.gorch += er32(GORCH);
3618         adapter->stats.bprc += er32(BPRC);
3619         adapter->stats.mprc += er32(MPRC);
3620         adapter->stats.roc += er32(ROC);
3621
3622         adapter->stats.prc64 += er32(PRC64);
3623         adapter->stats.prc127 += er32(PRC127);
3624         adapter->stats.prc255 += er32(PRC255);
3625         adapter->stats.prc511 += er32(PRC511);
3626         adapter->stats.prc1023 += er32(PRC1023);
3627         adapter->stats.prc1522 += er32(PRC1522);
3628
3629         adapter->stats.symerrs += er32(SYMERRS);
3630         adapter->stats.mpc += er32(MPC);
3631         adapter->stats.scc += er32(SCC);
3632         adapter->stats.ecol += er32(ECOL);
3633         adapter->stats.mcc += er32(MCC);
3634         adapter->stats.latecol += er32(LATECOL);
3635         adapter->stats.dc += er32(DC);
3636         adapter->stats.sec += er32(SEC);
3637         adapter->stats.rlec += er32(RLEC);
3638         adapter->stats.xonrxc += er32(XONRXC);
3639         adapter->stats.xontxc += er32(XONTXC);
3640         adapter->stats.xoffrxc += er32(XOFFRXC);
3641         adapter->stats.xofftxc += er32(XOFFTXC);
3642         adapter->stats.fcruc += er32(FCRUC);
3643         adapter->stats.gptc += er32(GPTC);
3644         adapter->stats.gotcl += er32(GOTCL);
3645         adapter->stats.gotch += er32(GOTCH);
3646         adapter->stats.rnbc += er32(RNBC);
3647         adapter->stats.ruc += er32(RUC);
3648         adapter->stats.rfc += er32(RFC);
3649         adapter->stats.rjc += er32(RJC);
3650         adapter->stats.torl += er32(TORL);
3651         adapter->stats.torh += er32(TORH);
3652         adapter->stats.totl += er32(TOTL);
3653         adapter->stats.toth += er32(TOTH);
3654         adapter->stats.tpr += er32(TPR);
3655
3656         adapter->stats.ptc64 += er32(PTC64);
3657         adapter->stats.ptc127 += er32(PTC127);
3658         adapter->stats.ptc255 += er32(PTC255);
3659         adapter->stats.ptc511 += er32(PTC511);
3660         adapter->stats.ptc1023 += er32(PTC1023);
3661         adapter->stats.ptc1522 += er32(PTC1522);
3662
3663         adapter->stats.mptc += er32(MPTC);
3664         adapter->stats.bptc += er32(BPTC);
3665
3666         /* used for adaptive IFS */
3667
3668         hw->tx_packet_delta = er32(TPT);
3669         adapter->stats.tpt += hw->tx_packet_delta;
3670         hw->collision_delta = er32(COLC);
3671         adapter->stats.colc += hw->collision_delta;
3672
3673         if (hw->mac_type >= e1000_82543) {
3674                 adapter->stats.algnerrc += er32(ALGNERRC);
3675                 adapter->stats.rxerrc += er32(RXERRC);
3676                 adapter->stats.tncrs += er32(TNCRS);
3677                 adapter->stats.cexterr += er32(CEXTERR);
3678                 adapter->stats.tsctc += er32(TSCTC);
3679                 adapter->stats.tsctfc += er32(TSCTFC);
3680         }
3681
3682         /* Fill out the OS statistics structure */
3683         netdev->stats.multicast = adapter->stats.mprc;
3684         netdev->stats.collisions = adapter->stats.colc;
3685
3686         /* Rx Errors */
3687
3688         /* RLEC on some newer hardware can be incorrect so build
3689          * our own version based on RUC and ROC
3690          */
3691         netdev->stats.rx_errors = adapter->stats.rxerrc +
3692                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3693                 adapter->stats.ruc + adapter->stats.roc +
3694                 adapter->stats.cexterr;
3695         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3696         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3697         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3698         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3699         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3700
3701         /* Tx Errors */
3702         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3703         netdev->stats.tx_errors = adapter->stats.txerrc;
3704         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3705         netdev->stats.tx_window_errors = adapter->stats.latecol;
3706         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3707         if (hw->bad_tx_carr_stats_fd &&
3708             adapter->link_duplex == FULL_DUPLEX) {
3709                 netdev->stats.tx_carrier_errors = 0;
3710                 adapter->stats.tncrs = 0;
3711         }
3712
3713         /* Tx Dropped needs to be maintained elsewhere */
3714
3715         /* Phy Stats */
3716         if (hw->media_type == e1000_media_type_copper) {
3717                 if ((adapter->link_speed == SPEED_1000) &&
3718                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3719                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3720                         adapter->phy_stats.idle_errors += phy_tmp;
3721                 }
3722
3723                 if ((hw->mac_type <= e1000_82546) &&
3724                    (hw->phy_type == e1000_phy_m88) &&
3725                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3726                         adapter->phy_stats.receive_errors += phy_tmp;
3727         }
3728
3729         /* Management Stats */
3730         if (hw->has_smbus) {
3731                 adapter->stats.mgptc += er32(MGTPTC);
3732                 adapter->stats.mgprc += er32(MGTPRC);
3733                 adapter->stats.mgpdc += er32(MGTPDC);
3734         }
3735
3736         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3737 }
3738
3739 /**
3740  * e1000_intr - Interrupt Handler
3741  * @irq: interrupt number
3742  * @data: pointer to a network interface device structure
3743  **/
3744 static irqreturn_t e1000_intr(int irq, void *data)
3745 {
3746         struct net_device *netdev = data;
3747         struct e1000_adapter *adapter = netdev_priv(netdev);
3748         struct e1000_hw *hw = &adapter->hw;
3749         u32 icr = er32(ICR);
3750
3751         if (unlikely((!icr)))
3752                 return IRQ_NONE;  /* Not our interrupt */
3753
3754         /* we might have caused the interrupt, but the above
3755          * read cleared it, and just in case the driver is
3756          * down there is nothing to do so return handled
3757          */
3758         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3759                 return IRQ_HANDLED;
3760
3761         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3762                 hw->get_link_status = 1;
3763                 /* guard against interrupt when we're going down */
3764                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3765                         schedule_delayed_work(&adapter->watchdog_task, 1);
3766         }
3767
3768         /* disable interrupts, without the synchronize_irq bit */
3769         ew32(IMC, ~0);
3770         E1000_WRITE_FLUSH();
3771
3772         if (likely(napi_schedule_prep(&adapter->napi))) {
3773                 adapter->total_tx_bytes = 0;
3774                 adapter->total_tx_packets = 0;
3775                 adapter->total_rx_bytes = 0;
3776                 adapter->total_rx_packets = 0;
3777                 __napi_schedule(&adapter->napi);
3778         } else {
3779                 /* this really should not happen! if it does it is basically a
3780                  * bug, but not a hard error, so enable ints and continue
3781                  */
3782                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3783                         e1000_irq_enable(adapter);
3784         }
3785
3786         return IRQ_HANDLED;
3787 }
3788
3789 /**
3790  * e1000_clean - NAPI Rx polling callback
3791  * @napi: napi struct containing references to driver info
3792  * @budget: budget given to driver for receive packets
3793  **/
3794 static int e1000_clean(struct napi_struct *napi, int budget)
3795 {
3796         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3797                                                      napi);
3798         int tx_clean_complete = 0, work_done = 0;
3799
3800         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3801
3802         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3803
3804         if (!tx_clean_complete || work_done == budget)
3805                 return budget;
3806
3807         /* Exit the polling mode, but don't re-enable interrupts if stack might
3808          * poll us due to busy-polling
3809          */
3810         if (likely(napi_complete_done(napi, work_done))) {
3811                 if (likely(adapter->itr_setting & 3))
3812                         e1000_set_itr(adapter);
3813                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3814                         e1000_irq_enable(adapter);
3815         }
3816
3817         return work_done;
3818 }
3819
3820 /**
3821  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3822  * @adapter: board private structure
3823  * @tx_ring: ring to clean
3824  **/
3825 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3826                                struct e1000_tx_ring *tx_ring)
3827 {
3828         struct e1000_hw *hw = &adapter->hw;
3829         struct net_device *netdev = adapter->netdev;
3830         struct e1000_tx_desc *tx_desc, *eop_desc;
3831         struct e1000_tx_buffer *buffer_info;
3832         unsigned int i, eop;
3833         unsigned int count = 0;
3834         unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3835         unsigned int bytes_compl = 0, pkts_compl = 0;
3836
3837         i = tx_ring->next_to_clean;
3838         eop = tx_ring->buffer_info[i].next_to_watch;
3839         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3840
3841         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3842                (count < tx_ring->count)) {
3843                 bool cleaned = false;
3844                 dma_rmb();      /* read buffer_info after eop_desc */
3845                 for ( ; !cleaned; count++) {
3846                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3847                         buffer_info = &tx_ring->buffer_info[i];
3848                         cleaned = (i == eop);
3849
3850                         if (cleaned) {
3851                                 total_tx_packets += buffer_info->segs;
3852                                 total_tx_bytes += buffer_info->bytecount;
3853                                 if (buffer_info->skb) {
3854                                         bytes_compl += buffer_info->skb->len;
3855                                         pkts_compl++;
3856                                 }
3857
3858                         }
3859                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3860                         tx_desc->upper.data = 0;
3861
3862                         if (unlikely(++i == tx_ring->count))
3863                                 i = 0;
3864                 }
3865
3866                 eop = tx_ring->buffer_info[i].next_to_watch;
3867                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3868         }
3869
3870         /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3871          * which will reuse the cleaned buffers.
3872          */
3873         smp_store_release(&tx_ring->next_to_clean, i);
3874
3875         netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3876
3877 #define TX_WAKE_THRESHOLD 32
3878         if (unlikely(count && netif_carrier_ok(netdev) &&
3879                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3880                 /* Make sure that anybody stopping the queue after this
3881                  * sees the new next_to_clean.
3882                  */
3883                 smp_mb();
3884
3885                 if (netif_queue_stopped(netdev) &&
3886                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3887                         netif_wake_queue(netdev);
3888                         ++adapter->restart_queue;
3889                 }
3890         }
3891
3892         if (adapter->detect_tx_hung) {
3893                 /* Detect a transmit hang in hardware, this serializes the
3894                  * check with the clearing of time_stamp and movement of i
3895                  */
3896                 adapter->detect_tx_hung = false;
3897                 if (tx_ring->buffer_info[eop].time_stamp &&
3898                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3899                                (adapter->tx_timeout_factor * HZ)) &&
3900                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3901
3902                         /* detected Tx unit hang */
3903                         e_err(drv, "Detected Tx Unit Hang\n"
3904                               "  Tx Queue             <%lu>\n"
3905                               "  TDH                  <%x>\n"
3906                               "  TDT                  <%x>\n"
3907                               "  next_to_use          <%x>\n"
3908                               "  next_to_clean        <%x>\n"
3909                               "buffer_info[next_to_clean]\n"
3910                               "  time_stamp           <%lx>\n"
3911                               "  next_to_watch        <%x>\n"
3912                               "  jiffies              <%lx>\n"
3913                               "  next_to_watch.status <%x>\n",
3914                                 (unsigned long)(tx_ring - adapter->tx_ring),
3915                                 readl(hw->hw_addr + tx_ring->tdh),
3916                                 readl(hw->hw_addr + tx_ring->tdt),
3917                                 tx_ring->next_to_use,
3918                                 tx_ring->next_to_clean,
3919                                 tx_ring->buffer_info[eop].time_stamp,
3920                                 eop,
3921                                 jiffies,
3922                                 eop_desc->upper.fields.status);
3923                         e1000_dump(adapter);
3924                         netif_stop_queue(netdev);
3925                 }
3926         }
3927         adapter->total_tx_bytes += total_tx_bytes;
3928         adapter->total_tx_packets += total_tx_packets;
3929         netdev->stats.tx_bytes += total_tx_bytes;
3930         netdev->stats.tx_packets += total_tx_packets;
3931         return count < tx_ring->count;
3932 }
3933
3934 /**
3935  * e1000_rx_checksum - Receive Checksum Offload for 82543
3936  * @adapter:     board private structure
3937  * @status_err:  receive descriptor status and error fields
3938  * @csum:        receive descriptor csum field
3939  * @skb:         socket buffer with received data
3940  **/
3941 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3942                               u32 csum, struct sk_buff *skb)
3943 {
3944         struct e1000_hw *hw = &adapter->hw;
3945         u16 status = (u16)status_err;
3946         u8 errors = (u8)(status_err >> 24);
3947
3948         skb_checksum_none_assert(skb);
3949
3950         /* 82543 or newer only */
3951         if (unlikely(hw->mac_type < e1000_82543))
3952                 return;
3953         /* Ignore Checksum bit is set */
3954         if (unlikely(status & E1000_RXD_STAT_IXSM))
3955                 return;
3956         /* TCP/UDP checksum error bit is set */
3957         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3958                 /* let the stack verify checksum errors */
3959                 adapter->hw_csum_err++;
3960                 return;
3961         }
3962         /* TCP/UDP Checksum has not been calculated */
3963         if (!(status & E1000_RXD_STAT_TCPCS))
3964                 return;
3965
3966         /* It must be a TCP or UDP packet with a valid checksum */
3967         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3968                 /* TCP checksum is good */
3969                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3970         }
3971         adapter->hw_csum_good++;
3972 }
3973
3974 /**
3975  * e1000_consume_page - helper function for jumbo Rx path
3976  * @bi: software descriptor shadow data
3977  * @skb: skb being modified
3978  * @length: length of data being added
3979  **/
3980 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3981                                u16 length)
3982 {
3983         bi->rxbuf.page = NULL;
3984         skb->len += length;
3985         skb->data_len += length;
3986         skb->truesize += PAGE_SIZE;
3987 }
3988
3989 /**
3990  * e1000_receive_skb - helper function to handle rx indications
3991  * @adapter: board private structure
3992  * @status: descriptor status field as written by hardware
3993  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3994  * @skb: pointer to sk_buff to be indicated to stack
3995  */
3996 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3997                               __le16 vlan, struct sk_buff *skb)
3998 {
3999         skb->protocol = eth_type_trans(skb, adapter->netdev);
4000
4001         if (status & E1000_RXD_STAT_VP) {
4002                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4003
4004                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4005         }
4006         napi_gro_receive(&adapter->napi, skb);
4007 }
4008
4009 /**
4010  * e1000_tbi_adjust_stats
4011  * @hw: Struct containing variables accessed by shared code
4012  * @stats: point to stats struct
4013  * @frame_len: The length of the frame in question
4014  * @mac_addr: The Ethernet destination address of the frame in question
4015  *
4016  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4017  */
4018 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4019                                    struct e1000_hw_stats *stats,
4020                                    u32 frame_len, const u8 *mac_addr)
4021 {
4022         u64 carry_bit;
4023
4024         /* First adjust the frame length. */
4025         frame_len--;
4026         /* We need to adjust the statistics counters, since the hardware
4027          * counters overcount this packet as a CRC error and undercount
4028          * the packet as a good packet
4029          */
4030         /* This packet should not be counted as a CRC error. */
4031         stats->crcerrs--;
4032         /* This packet does count as a Good Packet Received. */
4033         stats->gprc++;
4034
4035         /* Adjust the Good Octets received counters */
4036         carry_bit = 0x80000000 & stats->gorcl;
4037         stats->gorcl += frame_len;
4038         /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4039          * Received Count) was one before the addition,
4040          * AND it is zero after, then we lost the carry out,
4041          * need to add one to Gorch (Good Octets Received Count High).
4042          * This could be simplified if all environments supported
4043          * 64-bit integers.
4044          */
4045         if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4046                 stats->gorch++;
4047         /* Is this a broadcast or multicast?  Check broadcast first,
4048          * since the test for a multicast frame will test positive on
4049          * a broadcast frame.
4050          */
4051         if (is_broadcast_ether_addr(mac_addr))
4052                 stats->bprc++;
4053         else if (is_multicast_ether_addr(mac_addr))
4054                 stats->mprc++;
4055
4056         if (frame_len == hw->max_frame_size) {
4057                 /* In this case, the hardware has overcounted the number of
4058                  * oversize frames.
4059                  */
4060                 if (stats->roc > 0)
4061                         stats->roc--;
4062         }
4063
4064         /* Adjust the bin counters when the extra byte put the frame in the
4065          * wrong bin. Remember that the frame_len was adjusted above.
4066          */
4067         if (frame_len == 64) {
4068                 stats->prc64++;
4069                 stats->prc127--;
4070         } else if (frame_len == 127) {
4071                 stats->prc127++;
4072                 stats->prc255--;
4073         } else if (frame_len == 255) {
4074                 stats->prc255++;
4075                 stats->prc511--;
4076         } else if (frame_len == 511) {
4077                 stats->prc511++;
4078                 stats->prc1023--;
4079         } else if (frame_len == 1023) {
4080                 stats->prc1023++;
4081                 stats->prc1522--;
4082         } else if (frame_len == 1522) {
4083                 stats->prc1522++;
4084         }
4085 }
4086
4087 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4088                                     u8 status, u8 errors,
4089                                     u32 length, const u8 *data)
4090 {
4091         struct e1000_hw *hw = &adapter->hw;
4092         u8 last_byte = *(data + length - 1);
4093
4094         if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4095                 unsigned long irq_flags;
4096
4097                 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4098                 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4099                 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4100
4101                 return true;
4102         }
4103
4104         return false;
4105 }
4106
4107 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4108                                           unsigned int bufsz)
4109 {
4110         struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4111
4112         if (unlikely(!skb))
4113                 adapter->alloc_rx_buff_failed++;
4114         return skb;
4115 }
4116
4117 /**
4118  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4119  * @adapter: board private structure
4120  * @rx_ring: ring to clean
4121  * @work_done: amount of napi work completed this call
4122  * @work_to_do: max amount of work allowed for this call to do
4123  *
4124  * the return value indicates whether actual cleaning was done, there
4125  * is no guarantee that everything was cleaned
4126  */
4127 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4128                                      struct e1000_rx_ring *rx_ring,
4129                                      int *work_done, int work_to_do)
4130 {
4131         struct net_device *netdev = adapter->netdev;
4132         struct pci_dev *pdev = adapter->pdev;
4133         struct e1000_rx_desc *rx_desc, *next_rxd;
4134         struct e1000_rx_buffer *buffer_info, *next_buffer;
4135         u32 length;
4136         unsigned int i;
4137         int cleaned_count = 0;
4138         bool cleaned = false;
4139         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4140
4141         i = rx_ring->next_to_clean;
4142         rx_desc = E1000_RX_DESC(*rx_ring, i);
4143         buffer_info = &rx_ring->buffer_info[i];
4144
4145         while (rx_desc->status & E1000_RXD_STAT_DD) {
4146                 struct sk_buff *skb;
4147                 u8 status;
4148
4149                 if (*work_done >= work_to_do)
4150                         break;
4151                 (*work_done)++;
4152                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4153
4154                 status = rx_desc->status;
4155
4156                 if (++i == rx_ring->count)
4157                         i = 0;
4158
4159                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4160                 prefetch(next_rxd);
4161
4162                 next_buffer = &rx_ring->buffer_info[i];
4163
4164                 cleaned = true;
4165                 cleaned_count++;
4166                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4167                                adapter->rx_buffer_len, DMA_FROM_DEVICE);
4168                 buffer_info->dma = 0;
4169
4170                 length = le16_to_cpu(rx_desc->length);
4171
4172                 /* errors is only valid for DD + EOP descriptors */
4173                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4174                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4175                         u8 *mapped = page_address(buffer_info->rxbuf.page);
4176
4177                         if (e1000_tbi_should_accept(adapter, status,
4178                                                     rx_desc->errors,
4179                                                     length, mapped)) {
4180                                 length--;
4181                         } else if (netdev->features & NETIF_F_RXALL) {
4182                                 goto process_skb;
4183                         } else {
4184                                 /* an error means any chain goes out the window
4185                                  * too
4186                                  */
4187                                 dev_kfree_skb(rx_ring->rx_skb_top);
4188                                 rx_ring->rx_skb_top = NULL;
4189                                 goto next_desc;
4190                         }
4191                 }
4192
4193 #define rxtop rx_ring->rx_skb_top
4194 process_skb:
4195                 if (!(status & E1000_RXD_STAT_EOP)) {
4196                         /* this descriptor is only the beginning (or middle) */
4197                         if (!rxtop) {
4198                                 /* this is the beginning of a chain */
4199                                 rxtop = napi_get_frags(&adapter->napi);
4200                                 if (!rxtop)
4201                                         break;
4202
4203                                 skb_fill_page_desc(rxtop, 0,
4204                                                    buffer_info->rxbuf.page,
4205                                                    0, length);
4206                         } else {
4207                                 /* this is the middle of a chain */
4208                                 skb_fill_page_desc(rxtop,
4209                                     skb_shinfo(rxtop)->nr_frags,
4210                                     buffer_info->rxbuf.page, 0, length);
4211                         }
4212                         e1000_consume_page(buffer_info, rxtop, length);
4213                         goto next_desc;
4214                 } else {
4215                         if (rxtop) {
4216                                 /* end of the chain */
4217                                 skb_fill_page_desc(rxtop,
4218                                     skb_shinfo(rxtop)->nr_frags,
4219                                     buffer_info->rxbuf.page, 0, length);
4220                                 skb = rxtop;
4221                                 rxtop = NULL;
4222                                 e1000_consume_page(buffer_info, skb, length);
4223                         } else {
4224                                 struct page *p;
4225                                 /* no chain, got EOP, this buf is the packet
4226                                  * copybreak to save the put_page/alloc_page
4227                                  */
4228                                 p = buffer_info->rxbuf.page;
4229                                 if (length <= copybreak) {
4230                                         u8 *vaddr;
4231
4232                                         if (likely(!(netdev->features & NETIF_F_RXFCS)))
4233                                                 length -= 4;
4234                                         skb = e1000_alloc_rx_skb(adapter,
4235                                                                  length);
4236                                         if (!skb)
4237                                                 break;
4238
4239                                         vaddr = kmap_atomic(p);
4240                                         memcpy(skb_tail_pointer(skb), vaddr,
4241                                                length);
4242                                         kunmap_atomic(vaddr);
4243                                         /* re-use the page, so don't erase
4244                                          * buffer_info->rxbuf.page
4245                                          */
4246                                         skb_put(skb, length);
4247                                         e1000_rx_checksum(adapter,
4248                                                           status | rx_desc->errors << 24,
4249                                                           le16_to_cpu(rx_desc->csum), skb);
4250
4251                                         total_rx_bytes += skb->len;
4252                                         total_rx_packets++;
4253
4254                                         e1000_receive_skb(adapter, status,
4255                                                           rx_desc->special, skb);
4256                                         goto next_desc;
4257                                 } else {
4258                                         skb = napi_get_frags(&adapter->napi);
4259                                         if (!skb) {
4260                                                 adapter->alloc_rx_buff_failed++;
4261                                                 break;
4262                                         }
4263                                         skb_fill_page_desc(skb, 0, p, 0,
4264                                                            length);
4265                                         e1000_consume_page(buffer_info, skb,
4266                                                            length);
4267                                 }
4268                         }
4269                 }
4270
4271                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4272                 e1000_rx_checksum(adapter,
4273                                   (u32)(status) |
4274                                   ((u32)(rx_desc->errors) << 24),
4275                                   le16_to_cpu(rx_desc->csum), skb);
4276
4277                 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4278                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4279                         pskb_trim(skb, skb->len - 4);
4280                 total_rx_packets++;
4281
4282                 if (status & E1000_RXD_STAT_VP) {
4283                         __le16 vlan = rx_desc->special;
4284                         u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4285
4286                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4287                 }
4288
4289                 napi_gro_frags(&adapter->napi);
4290
4291 next_desc:
4292                 rx_desc->status = 0;
4293
4294                 /* return some buffers to hardware, one at a time is too slow */
4295                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4296                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4297                         cleaned_count = 0;
4298                 }
4299
4300                 /* use prefetched values */
4301                 rx_desc = next_rxd;
4302                 buffer_info = next_buffer;
4303         }
4304         rx_ring->next_to_clean = i;
4305
4306         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4307         if (cleaned_count)
4308                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4309
4310         adapter->total_rx_packets += total_rx_packets;
4311         adapter->total_rx_bytes += total_rx_bytes;
4312         netdev->stats.rx_bytes += total_rx_bytes;
4313         netdev->stats.rx_packets += total_rx_packets;
4314         return cleaned;
4315 }
4316
4317 /* this should improve performance for small packets with large amounts
4318  * of reassembly being done in the stack
4319  */
4320 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4321                                        struct e1000_rx_buffer *buffer_info,
4322                                        u32 length, const void *data)
4323 {
4324         struct sk_buff *skb;
4325
4326         if (length > copybreak)
4327                 return NULL;
4328
4329         skb = e1000_alloc_rx_skb(adapter, length);
4330         if (!skb)
4331                 return NULL;
4332
4333         dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4334                                 length, DMA_FROM_DEVICE);
4335
4336         skb_put_data(skb, data, length);
4337
4338         return skb;
4339 }
4340
4341 /**
4342  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4343  * @adapter: board private structure
4344  * @rx_ring: ring to clean
4345  * @work_done: amount of napi work completed this call
4346  * @work_to_do: max amount of work allowed for this call to do
4347  */
4348 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4349                                struct e1000_rx_ring *rx_ring,
4350                                int *work_done, int work_to_do)
4351 {
4352         struct net_device *netdev = adapter->netdev;
4353         struct pci_dev *pdev = adapter->pdev;
4354         struct e1000_rx_desc *rx_desc, *next_rxd;
4355         struct e1000_rx_buffer *buffer_info, *next_buffer;
4356         u32 length;
4357         unsigned int i;
4358         int cleaned_count = 0;
4359         bool cleaned = false;
4360         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4361
4362         i = rx_ring->next_to_clean;
4363         rx_desc = E1000_RX_DESC(*rx_ring, i);
4364         buffer_info = &rx_ring->buffer_info[i];
4365
4366         while (rx_desc->status & E1000_RXD_STAT_DD) {
4367                 struct sk_buff *skb;
4368                 u8 *data;
4369                 u8 status;
4370
4371                 if (*work_done >= work_to_do)
4372                         break;
4373                 (*work_done)++;
4374                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4375
4376                 status = rx_desc->status;
4377                 length = le16_to_cpu(rx_desc->length);
4378
4379                 data = buffer_info->rxbuf.data;
4380                 prefetch(data);
4381                 skb = e1000_copybreak(adapter, buffer_info, length, data);
4382                 if (!skb) {
4383                         unsigned int frag_len = e1000_frag_len(adapter);
4384
4385                         skb = build_skb(data - E1000_HEADROOM, frag_len);
4386                         if (!skb) {
4387                                 adapter->alloc_rx_buff_failed++;
4388                                 break;
4389                         }
4390
4391                         skb_reserve(skb, E1000_HEADROOM);
4392                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4393                                          adapter->rx_buffer_len,
4394                                          DMA_FROM_DEVICE);
4395                         buffer_info->dma = 0;
4396                         buffer_info->rxbuf.data = NULL;
4397                 }
4398
4399                 if (++i == rx_ring->count)
4400                         i = 0;
4401
4402                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4403                 prefetch(next_rxd);
4404
4405                 next_buffer = &rx_ring->buffer_info[i];
4406
4407                 cleaned = true;
4408                 cleaned_count++;
4409
4410                 /* !EOP means multiple descriptors were used to store a single
4411                  * packet, if thats the case we need to toss it.  In fact, we
4412                  * to toss every packet with the EOP bit clear and the next
4413                  * frame that _does_ have the EOP bit set, as it is by
4414                  * definition only a frame fragment
4415                  */
4416                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4417                         adapter->discarding = true;
4418
4419                 if (adapter->discarding) {
4420                         /* All receives must fit into a single buffer */
4421                         netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4422                         dev_kfree_skb(skb);
4423                         if (status & E1000_RXD_STAT_EOP)
4424                                 adapter->discarding = false;
4425                         goto next_desc;
4426                 }
4427
4428                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4429                         if (e1000_tbi_should_accept(adapter, status,
4430                                                     rx_desc->errors,
4431                                                     length, data)) {
4432                                 length--;
4433                         } else if (netdev->features & NETIF_F_RXALL) {
4434                                 goto process_skb;
4435                         } else {
4436                                 dev_kfree_skb(skb);
4437                                 goto next_desc;
4438                         }
4439                 }
4440
4441 process_skb:
4442                 total_rx_bytes += (length - 4); /* don't count FCS */
4443                 total_rx_packets++;
4444
4445                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4446                         /* adjust length to remove Ethernet CRC, this must be
4447                          * done after the TBI_ACCEPT workaround above
4448                          */
4449                         length -= 4;
4450
4451                 if (buffer_info->rxbuf.data == NULL)
4452                         skb_put(skb, length);
4453                 else /* copybreak skb */
4454                         skb_trim(skb, length);
4455
4456                 /* Receive Checksum Offload */
4457                 e1000_rx_checksum(adapter,
4458                                   (u32)(status) |
4459                                   ((u32)(rx_desc->errors) << 24),
4460                                   le16_to_cpu(rx_desc->csum), skb);
4461
4462                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4463
4464 next_desc:
4465                 rx_desc->status = 0;
4466
4467                 /* return some buffers to hardware, one at a time is too slow */
4468                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4469                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4470                         cleaned_count = 0;
4471                 }
4472
4473                 /* use prefetched values */
4474                 rx_desc = next_rxd;
4475                 buffer_info = next_buffer;
4476         }
4477         rx_ring->next_to_clean = i;
4478
4479         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4480         if (cleaned_count)
4481                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4482
4483         adapter->total_rx_packets += total_rx_packets;
4484         adapter->total_rx_bytes += total_rx_bytes;
4485         netdev->stats.rx_bytes += total_rx_bytes;
4486         netdev->stats.rx_packets += total_rx_packets;
4487         return cleaned;
4488 }
4489
4490 /**
4491  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4492  * @adapter: address of board private structure
4493  * @rx_ring: pointer to receive ring structure
4494  * @cleaned_count: number of buffers to allocate this pass
4495  **/
4496 static void
4497 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4498                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4499 {
4500         struct pci_dev *pdev = adapter->pdev;
4501         struct e1000_rx_desc *rx_desc;
4502         struct e1000_rx_buffer *buffer_info;
4503         unsigned int i;
4504
4505         i = rx_ring->next_to_use;
4506         buffer_info = &rx_ring->buffer_info[i];
4507
4508         while (cleaned_count--) {
4509                 /* allocate a new page if necessary */
4510                 if (!buffer_info->rxbuf.page) {
4511                         buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4512                         if (unlikely(!buffer_info->rxbuf.page)) {
4513                                 adapter->alloc_rx_buff_failed++;
4514                                 break;
4515                         }
4516                 }
4517
4518                 if (!buffer_info->dma) {
4519                         buffer_info->dma = dma_map_page(&pdev->dev,
4520                                                         buffer_info->rxbuf.page, 0,
4521                                                         adapter->rx_buffer_len,
4522                                                         DMA_FROM_DEVICE);
4523                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4524                                 put_page(buffer_info->rxbuf.page);
4525                                 buffer_info->rxbuf.page = NULL;
4526                                 buffer_info->dma = 0;
4527                                 adapter->alloc_rx_buff_failed++;
4528                                 break;
4529                         }
4530                 }
4531
4532                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4533                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4534
4535                 if (unlikely(++i == rx_ring->count))
4536                         i = 0;
4537                 buffer_info = &rx_ring->buffer_info[i];
4538         }
4539
4540         if (likely(rx_ring->next_to_use != i)) {
4541                 rx_ring->next_to_use = i;
4542                 if (unlikely(i-- == 0))
4543                         i = (rx_ring->count - 1);
4544
4545                 /* Force memory writes to complete before letting h/w
4546                  * know there are new descriptors to fetch.  (Only
4547                  * applicable for weak-ordered memory model archs,
4548                  * such as IA-64).
4549                  */
4550                 dma_wmb();
4551                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4552         }
4553 }
4554
4555 /**
4556  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4557  * @adapter: address of board private structure
4558  * @rx_ring: pointer to ring struct
4559  * @cleaned_count: number of new Rx buffers to try to allocate
4560  **/
4561 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4562                                    struct e1000_rx_ring *rx_ring,
4563                                    int cleaned_count)
4564 {
4565         struct e1000_hw *hw = &adapter->hw;
4566         struct pci_dev *pdev = adapter->pdev;
4567         struct e1000_rx_desc *rx_desc;
4568         struct e1000_rx_buffer *buffer_info;
4569         unsigned int i;
4570         unsigned int bufsz = adapter->rx_buffer_len;
4571
4572         i = rx_ring->next_to_use;
4573         buffer_info = &rx_ring->buffer_info[i];
4574
4575         while (cleaned_count--) {
4576                 void *data;
4577
4578                 if (buffer_info->rxbuf.data)
4579                         goto skip;
4580
4581                 data = e1000_alloc_frag(adapter);
4582                 if (!data) {
4583                         /* Better luck next round */
4584                         adapter->alloc_rx_buff_failed++;
4585                         break;
4586                 }
4587
4588                 /* Fix for errata 23, can't cross 64kB boundary */
4589                 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4590                         void *olddata = data;
4591                         e_err(rx_err, "skb align check failed: %u bytes at "
4592                               "%p\n", bufsz, data);
4593                         /* Try again, without freeing the previous */
4594                         data = e1000_alloc_frag(adapter);
4595                         /* Failed allocation, critical failure */
4596                         if (!data) {
4597                                 skb_free_frag(olddata);
4598                                 adapter->alloc_rx_buff_failed++;
4599                                 break;
4600                         }
4601
4602                         if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4603                                 /* give up */
4604                                 skb_free_frag(data);
4605                                 skb_free_frag(olddata);
4606                                 adapter->alloc_rx_buff_failed++;
4607                                 break;
4608                         }
4609
4610                         /* Use new allocation */
4611                         skb_free_frag(olddata);
4612                 }
4613                 buffer_info->dma = dma_map_single(&pdev->dev,
4614                                                   data,
4615                                                   adapter->rx_buffer_len,
4616                                                   DMA_FROM_DEVICE);
4617                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4618                         skb_free_frag(data);
4619                         buffer_info->dma = 0;
4620                         adapter->alloc_rx_buff_failed++;
4621                         break;
4622                 }
4623
4624                 /* XXX if it was allocated cleanly it will never map to a
4625                  * boundary crossing
4626                  */
4627
4628                 /* Fix for errata 23, can't cross 64kB boundary */
4629                 if (!e1000_check_64k_bound(adapter,
4630                                         (void *)(unsigned long)buffer_info->dma,
4631                                         adapter->rx_buffer_len)) {
4632                         e_err(rx_err, "dma align check failed: %u bytes at "
4633                               "%p\n", adapter->rx_buffer_len,
4634                               (void *)(unsigned long)buffer_info->dma);
4635
4636                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4637                                          adapter->rx_buffer_len,
4638                                          DMA_FROM_DEVICE);
4639
4640                         skb_free_frag(data);
4641                         buffer_info->rxbuf.data = NULL;
4642                         buffer_info->dma = 0;
4643
4644                         adapter->alloc_rx_buff_failed++;
4645                         break;
4646                 }
4647                 buffer_info->rxbuf.data = data;
4648  skip:
4649                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4650                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4651
4652                 if (unlikely(++i == rx_ring->count))
4653                         i = 0;
4654                 buffer_info = &rx_ring->buffer_info[i];
4655         }
4656
4657         if (likely(rx_ring->next_to_use != i)) {
4658                 rx_ring->next_to_use = i;
4659                 if (unlikely(i-- == 0))
4660                         i = (rx_ring->count - 1);
4661
4662                 /* Force memory writes to complete before letting h/w
4663                  * know there are new descriptors to fetch.  (Only
4664                  * applicable for weak-ordered memory model archs,
4665                  * such as IA-64).
4666                  */
4667                 dma_wmb();
4668                 writel(i, hw->hw_addr + rx_ring->rdt);
4669         }
4670 }
4671
4672 /**
4673  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4674  * @adapter: address of board private structure
4675  **/
4676 static void e1000_smartspeed(struct e1000_adapter *adapter)
4677 {
4678         struct e1000_hw *hw = &adapter->hw;
4679         u16 phy_status;
4680         u16 phy_ctrl;
4681
4682         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4683            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4684                 return;
4685
4686         if (adapter->smartspeed == 0) {
4687                 /* If Master/Slave config fault is asserted twice,
4688                  * we assume back-to-back
4689                  */
4690                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4691                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4692                         return;
4693                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4694                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4695                         return;
4696                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4697                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4698                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4699                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4700                                             phy_ctrl);
4701                         adapter->smartspeed++;
4702                         if (!e1000_phy_setup_autoneg(hw) &&
4703                            !e1000_read_phy_reg(hw, PHY_CTRL,
4704                                                &phy_ctrl)) {
4705                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4706                                              MII_CR_RESTART_AUTO_NEG);
4707                                 e1000_write_phy_reg(hw, PHY_CTRL,
4708                                                     phy_ctrl);
4709                         }
4710                 }
4711                 return;
4712         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4713                 /* If still no link, perhaps using 2/3 pair cable */
4714                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4715                 phy_ctrl |= CR_1000T_MS_ENABLE;
4716                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4717                 if (!e1000_phy_setup_autoneg(hw) &&
4718                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4719                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4720                                      MII_CR_RESTART_AUTO_NEG);
4721                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4722                 }
4723         }
4724         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4725         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4726                 adapter->smartspeed = 0;
4727 }
4728
4729 /**
4730  * e1000_ioctl - handle ioctl calls
4731  * @netdev: pointer to our netdev
4732  * @ifr: pointer to interface request structure
4733  * @cmd: ioctl data
4734  **/
4735 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4736 {
4737         switch (cmd) {
4738         case SIOCGMIIPHY:
4739         case SIOCGMIIREG:
4740         case SIOCSMIIREG:
4741                 return e1000_mii_ioctl(netdev, ifr, cmd);
4742         default:
4743                 return -EOPNOTSUPP;
4744         }
4745 }
4746
4747 /**
4748  * e1000_mii_ioctl -
4749  * @netdev: pointer to our netdev
4750  * @ifr: pointer to interface request structure
4751  * @cmd: ioctl data
4752  **/
4753 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4754                            int cmd)
4755 {
4756         struct e1000_adapter *adapter = netdev_priv(netdev);
4757         struct e1000_hw *hw = &adapter->hw;
4758         struct mii_ioctl_data *data = if_mii(ifr);
4759         int retval;
4760         u16 mii_reg;
4761         unsigned long flags;
4762
4763         if (hw->media_type != e1000_media_type_copper)
4764                 return -EOPNOTSUPP;
4765
4766         switch (cmd) {
4767         case SIOCGMIIPHY:
4768                 data->phy_id = hw->phy_addr;
4769                 break;
4770         case SIOCGMIIREG:
4771                 spin_lock_irqsave(&adapter->stats_lock, flags);
4772                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4773                                    &data->val_out)) {
4774                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4775                         return -EIO;
4776                 }
4777                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4778                 break;
4779         case SIOCSMIIREG:
4780                 if (data->reg_num & ~(0x1F))
4781                         return -EFAULT;
4782                 mii_reg = data->val_in;
4783                 spin_lock_irqsave(&adapter->stats_lock, flags);
4784                 if (e1000_write_phy_reg(hw, data->reg_num,
4785                                         mii_reg)) {
4786                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4787                         return -EIO;
4788                 }
4789                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4790                 if (hw->media_type == e1000_media_type_copper) {
4791                         switch (data->reg_num) {
4792                         case PHY_CTRL:
4793                                 if (mii_reg & MII_CR_POWER_DOWN)
4794                                         break;
4795                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4796                                         hw->autoneg = 1;
4797                                         hw->autoneg_advertised = 0x2F;
4798                                 } else {
4799                                         u32 speed;
4800                                         if (mii_reg & 0x40)
4801                                                 speed = SPEED_1000;
4802                                         else if (mii_reg & 0x2000)
4803                                                 speed = SPEED_100;
4804                                         else
4805                                                 speed = SPEED_10;
4806                                         retval = e1000_set_spd_dplx(
4807                                                 adapter, speed,
4808                                                 ((mii_reg & 0x100)
4809                                                  ? DUPLEX_FULL :
4810                                                  DUPLEX_HALF));
4811                                         if (retval)
4812                                                 return retval;
4813                                 }
4814                                 if (netif_running(adapter->netdev))
4815                                         e1000_reinit_locked(adapter);
4816                                 else
4817                                         e1000_reset(adapter);
4818                                 break;
4819                         case M88E1000_PHY_SPEC_CTRL:
4820                         case M88E1000_EXT_PHY_SPEC_CTRL:
4821                                 if (e1000_phy_reset(hw))
4822                                         return -EIO;
4823                                 break;
4824                         }
4825                 } else {
4826                         switch (data->reg_num) {
4827                         case PHY_CTRL:
4828                                 if (mii_reg & MII_CR_POWER_DOWN)
4829                                         break;
4830                                 if (netif_running(adapter->netdev))
4831                                         e1000_reinit_locked(adapter);
4832                                 else
4833                                         e1000_reset(adapter);
4834                                 break;
4835                         }
4836                 }
4837                 break;
4838         default:
4839                 return -EOPNOTSUPP;
4840         }
4841         return E1000_SUCCESS;
4842 }
4843
4844 void e1000_pci_set_mwi(struct e1000_hw *hw)
4845 {
4846         struct e1000_adapter *adapter = hw->back;
4847         int ret_val = pci_set_mwi(adapter->pdev);
4848
4849         if (ret_val)
4850                 e_err(probe, "Error in setting MWI\n");
4851 }
4852
4853 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4854 {
4855         struct e1000_adapter *adapter = hw->back;
4856
4857         pci_clear_mwi(adapter->pdev);
4858 }
4859
4860 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4861 {
4862         struct e1000_adapter *adapter = hw->back;
4863         return pcix_get_mmrbc(adapter->pdev);
4864 }
4865
4866 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4867 {
4868         struct e1000_adapter *adapter = hw->back;
4869         pcix_set_mmrbc(adapter->pdev, mmrbc);
4870 }
4871
4872 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4873 {
4874         outl(value, port);
4875 }
4876
4877 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4878 {
4879         u16 vid;
4880
4881         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4882                 return true;
4883         return false;
4884 }
4885
4886 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4887                               netdev_features_t features)
4888 {
4889         struct e1000_hw *hw = &adapter->hw;
4890         u32 ctrl;
4891
4892         ctrl = er32(CTRL);
4893         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4894                 /* enable VLAN tag insert/strip */
4895                 ctrl |= E1000_CTRL_VME;
4896         } else {
4897                 /* disable VLAN tag insert/strip */
4898                 ctrl &= ~E1000_CTRL_VME;
4899         }
4900         ew32(CTRL, ctrl);
4901 }
4902 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4903                                      bool filter_on)
4904 {
4905         struct e1000_hw *hw = &adapter->hw;
4906         u32 rctl;
4907
4908         if (!test_bit(__E1000_DOWN, &adapter->flags))
4909                 e1000_irq_disable(adapter);
4910
4911         __e1000_vlan_mode(adapter, adapter->netdev->features);
4912         if (filter_on) {
4913                 /* enable VLAN receive filtering */
4914                 rctl = er32(RCTL);
4915                 rctl &= ~E1000_RCTL_CFIEN;
4916                 if (!(adapter->netdev->flags & IFF_PROMISC))
4917                         rctl |= E1000_RCTL_VFE;
4918                 ew32(RCTL, rctl);
4919                 e1000_update_mng_vlan(adapter);
4920         } else {
4921                 /* disable VLAN receive filtering */
4922                 rctl = er32(RCTL);
4923                 rctl &= ~E1000_RCTL_VFE;
4924                 ew32(RCTL, rctl);
4925         }
4926
4927         if (!test_bit(__E1000_DOWN, &adapter->flags))
4928                 e1000_irq_enable(adapter);
4929 }
4930
4931 static void e1000_vlan_mode(struct net_device *netdev,
4932                             netdev_features_t features)
4933 {
4934         struct e1000_adapter *adapter = netdev_priv(netdev);
4935
4936         if (!test_bit(__E1000_DOWN, &adapter->flags))
4937                 e1000_irq_disable(adapter);
4938
4939         __e1000_vlan_mode(adapter, features);
4940
4941         if (!test_bit(__E1000_DOWN, &adapter->flags))
4942                 e1000_irq_enable(adapter);
4943 }
4944
4945 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4946                                  __be16 proto, u16 vid)
4947 {
4948         struct e1000_adapter *adapter = netdev_priv(netdev);
4949         struct e1000_hw *hw = &adapter->hw;
4950         u32 vfta, index;
4951
4952         if ((hw->mng_cookie.status &
4953              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4954             (vid == adapter->mng_vlan_id))
4955                 return 0;
4956
4957         if (!e1000_vlan_used(adapter))
4958                 e1000_vlan_filter_on_off(adapter, true);
4959
4960         /* add VID to filter table */
4961         index = (vid >> 5) & 0x7F;
4962         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4963         vfta |= (1 << (vid & 0x1F));
4964         e1000_write_vfta(hw, index, vfta);
4965
4966         set_bit(vid, adapter->active_vlans);
4967
4968         return 0;
4969 }
4970
4971 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4972                                   __be16 proto, u16 vid)
4973 {
4974         struct e1000_adapter *adapter = netdev_priv(netdev);
4975         struct e1000_hw *hw = &adapter->hw;
4976         u32 vfta, index;
4977
4978         if (!test_bit(__E1000_DOWN, &adapter->flags))
4979                 e1000_irq_disable(adapter);
4980         if (!test_bit(__E1000_DOWN, &adapter->flags))
4981                 e1000_irq_enable(adapter);
4982
4983         /* remove VID from filter table */
4984         index = (vid >> 5) & 0x7F;
4985         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4986         vfta &= ~(1 << (vid & 0x1F));
4987         e1000_write_vfta(hw, index, vfta);
4988
4989         clear_bit(vid, adapter->active_vlans);
4990
4991         if (!e1000_vlan_used(adapter))
4992                 e1000_vlan_filter_on_off(adapter, false);
4993
4994         return 0;
4995 }
4996
4997 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4998 {
4999         u16 vid;
5000
5001         if (!e1000_vlan_used(adapter))
5002                 return;
5003
5004         e1000_vlan_filter_on_off(adapter, true);
5005         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5006                 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5007 }
5008
5009 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5010 {
5011         struct e1000_hw *hw = &adapter->hw;
5012
5013         hw->autoneg = 0;
5014
5015         /* Make sure dplx is at most 1 bit and lsb of speed is not set
5016          * for the switch() below to work
5017          */
5018         if ((spd & 1) || (dplx & ~1))
5019                 goto err_inval;
5020
5021         /* Fiber NICs only allow 1000 gbps Full duplex */
5022         if ((hw->media_type == e1000_media_type_fiber) &&
5023             spd != SPEED_1000 &&
5024             dplx != DUPLEX_FULL)
5025                 goto err_inval;
5026
5027         switch (spd + dplx) {
5028         case SPEED_10 + DUPLEX_HALF:
5029                 hw->forced_speed_duplex = e1000_10_half;
5030                 break;
5031         case SPEED_10 + DUPLEX_FULL:
5032                 hw->forced_speed_duplex = e1000_10_full;
5033                 break;
5034         case SPEED_100 + DUPLEX_HALF:
5035                 hw->forced_speed_duplex = e1000_100_half;
5036                 break;
5037         case SPEED_100 + DUPLEX_FULL:
5038                 hw->forced_speed_duplex = e1000_100_full;
5039                 break;
5040         case SPEED_1000 + DUPLEX_FULL:
5041                 hw->autoneg = 1;
5042                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5043                 break;
5044         case SPEED_1000 + DUPLEX_HALF: /* not supported */
5045         default:
5046                 goto err_inval;
5047         }
5048
5049         /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5050         hw->mdix = AUTO_ALL_MODES;
5051
5052         return 0;
5053
5054 err_inval:
5055         e_err(probe, "Unsupported Speed/Duplex configuration\n");
5056         return -EINVAL;
5057 }
5058
5059 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5060 {
5061         struct net_device *netdev = pci_get_drvdata(pdev);
5062         struct e1000_adapter *adapter = netdev_priv(netdev);
5063         struct e1000_hw *hw = &adapter->hw;
5064         u32 ctrl, ctrl_ext, rctl, status;
5065         u32 wufc = adapter->wol;
5066
5067         netif_device_detach(netdev);
5068
5069         if (netif_running(netdev)) {
5070                 int count = E1000_CHECK_RESET_COUNT;
5071
5072                 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5073                         usleep_range(10000, 20000);
5074
5075                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5076                 e1000_down(adapter);
5077         }
5078
5079         status = er32(STATUS);
5080         if (status & E1000_STATUS_LU)
5081                 wufc &= ~E1000_WUFC_LNKC;
5082
5083         if (wufc) {
5084                 e1000_setup_rctl(adapter);
5085                 e1000_set_rx_mode(netdev);
5086
5087                 rctl = er32(RCTL);
5088
5089                 /* turn on all-multi mode if wake on multicast is enabled */
5090                 if (wufc & E1000_WUFC_MC)
5091                         rctl |= E1000_RCTL_MPE;
5092
5093                 /* enable receives in the hardware */
5094                 ew32(RCTL, rctl | E1000_RCTL_EN);
5095
5096                 if (hw->mac_type >= e1000_82540) {
5097                         ctrl = er32(CTRL);
5098                         /* advertise wake from D3Cold */
5099                         #define E1000_CTRL_ADVD3WUC 0x00100000
5100                         /* phy power management enable */
5101                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5102                         ctrl |= E1000_CTRL_ADVD3WUC |
5103                                 E1000_CTRL_EN_PHY_PWR_MGMT;
5104                         ew32(CTRL, ctrl);
5105                 }
5106
5107                 if (hw->media_type == e1000_media_type_fiber ||
5108                     hw->media_type == e1000_media_type_internal_serdes) {
5109                         /* keep the laser running in D3 */
5110                         ctrl_ext = er32(CTRL_EXT);
5111                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5112                         ew32(CTRL_EXT, ctrl_ext);
5113                 }
5114
5115                 ew32(WUC, E1000_WUC_PME_EN);
5116                 ew32(WUFC, wufc);
5117         } else {
5118                 ew32(WUC, 0);
5119                 ew32(WUFC, 0);
5120         }
5121
5122         e1000_release_manageability(adapter);
5123
5124         *enable_wake = !!wufc;
5125
5126         /* make sure adapter isn't asleep if manageability is enabled */
5127         if (adapter->en_mng_pt)
5128                 *enable_wake = true;
5129
5130         if (netif_running(netdev))
5131                 e1000_free_irq(adapter);
5132
5133         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5134                 pci_disable_device(pdev);
5135
5136         return 0;
5137 }
5138
5139 static int __maybe_unused e1000_suspend(struct device *dev)
5140 {
5141         int retval;
5142         struct pci_dev *pdev = to_pci_dev(dev);
5143         bool wake;
5144
5145         retval = __e1000_shutdown(pdev, &wake);
5146         device_set_wakeup_enable(dev, wake);
5147
5148         return retval;
5149 }
5150
5151 static int __maybe_unused e1000_resume(struct device *dev)
5152 {
5153         struct pci_dev *pdev = to_pci_dev(dev);
5154         struct net_device *netdev = pci_get_drvdata(pdev);
5155         struct e1000_adapter *adapter = netdev_priv(netdev);
5156         struct e1000_hw *hw = &adapter->hw;
5157         u32 err;
5158
5159         if (adapter->need_ioport)
5160                 err = pci_enable_device(pdev);
5161         else
5162                 err = pci_enable_device_mem(pdev);
5163         if (err) {
5164                 pr_err("Cannot enable PCI device from suspend\n");
5165                 return err;
5166         }
5167
5168         /* flush memory to make sure state is correct */
5169         smp_mb__before_atomic();
5170         clear_bit(__E1000_DISABLED, &adapter->flags);
5171         pci_set_master(pdev);
5172
5173         pci_enable_wake(pdev, PCI_D3hot, 0);
5174         pci_enable_wake(pdev, PCI_D3cold, 0);
5175
5176         if (netif_running(netdev)) {
5177                 err = e1000_request_irq(adapter);
5178                 if (err)
5179                         return err;
5180         }
5181
5182         e1000_power_up_phy(adapter);
5183         e1000_reset(adapter);
5184         ew32(WUS, ~0);
5185
5186         e1000_init_manageability(adapter);
5187
5188         if (netif_running(netdev))
5189                 e1000_up(adapter);
5190
5191         netif_device_attach(netdev);
5192
5193         return 0;
5194 }
5195
5196 static void e1000_shutdown(struct pci_dev *pdev)
5197 {
5198         bool wake;
5199
5200         __e1000_shutdown(pdev, &wake);
5201
5202         if (system_state == SYSTEM_POWER_OFF) {
5203                 pci_wake_from_d3(pdev, wake);
5204                 pci_set_power_state(pdev, PCI_D3hot);
5205         }
5206 }
5207
5208 #ifdef CONFIG_NET_POLL_CONTROLLER
5209 /* Polling 'interrupt' - used by things like netconsole to send skbs
5210  * without having to re-enable interrupts. It's not called while
5211  * the interrupt routine is executing.
5212  */
5213 static void e1000_netpoll(struct net_device *netdev)
5214 {
5215         struct e1000_adapter *adapter = netdev_priv(netdev);
5216
5217         if (disable_hardirq(adapter->pdev->irq))
5218                 e1000_intr(adapter->pdev->irq, netdev);
5219         enable_irq(adapter->pdev->irq);
5220 }
5221 #endif
5222
5223 /**
5224  * e1000_io_error_detected - called when PCI error is detected
5225  * @pdev: Pointer to PCI device
5226  * @state: The current pci connection state
5227  *
5228  * This function is called after a PCI bus error affecting
5229  * this device has been detected.
5230  */
5231 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5232                                                 pci_channel_state_t state)
5233 {
5234         struct net_device *netdev = pci_get_drvdata(pdev);
5235         struct e1000_adapter *adapter = netdev_priv(netdev);
5236
5237         netif_device_detach(netdev);
5238
5239         if (state == pci_channel_io_perm_failure)
5240                 return PCI_ERS_RESULT_DISCONNECT;
5241
5242         if (netif_running(netdev))
5243                 e1000_down(adapter);
5244
5245         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5246                 pci_disable_device(pdev);
5247
5248         /* Request a slot reset. */
5249         return PCI_ERS_RESULT_NEED_RESET;
5250 }
5251
5252 /**
5253  * e1000_io_slot_reset - called after the pci bus has been reset.
5254  * @pdev: Pointer to PCI device
5255  *
5256  * Restart the card from scratch, as if from a cold-boot. Implementation
5257  * resembles the first-half of the e1000_resume routine.
5258  */
5259 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5260 {
5261         struct net_device *netdev = pci_get_drvdata(pdev);
5262         struct e1000_adapter *adapter = netdev_priv(netdev);
5263         struct e1000_hw *hw = &adapter->hw;
5264         int err;
5265
5266         if (adapter->need_ioport)
5267                 err = pci_enable_device(pdev);
5268         else
5269                 err = pci_enable_device_mem(pdev);
5270         if (err) {
5271                 pr_err("Cannot re-enable PCI device after reset.\n");
5272                 return PCI_ERS_RESULT_DISCONNECT;
5273         }
5274
5275         /* flush memory to make sure state is correct */
5276         smp_mb__before_atomic();
5277         clear_bit(__E1000_DISABLED, &adapter->flags);
5278         pci_set_master(pdev);
5279
5280         pci_enable_wake(pdev, PCI_D3hot, 0);
5281         pci_enable_wake(pdev, PCI_D3cold, 0);
5282
5283         e1000_reset(adapter);
5284         ew32(WUS, ~0);
5285
5286         return PCI_ERS_RESULT_RECOVERED;
5287 }
5288
5289 /**
5290  * e1000_io_resume - called when traffic can start flowing again.
5291  * @pdev: Pointer to PCI device
5292  *
5293  * This callback is called when the error recovery driver tells us that
5294  * its OK to resume normal operation. Implementation resembles the
5295  * second-half of the e1000_resume routine.
5296  */
5297 static void e1000_io_resume(struct pci_dev *pdev)
5298 {
5299         struct net_device *netdev = pci_get_drvdata(pdev);
5300         struct e1000_adapter *adapter = netdev_priv(netdev);
5301
5302         e1000_init_manageability(adapter);
5303
5304         if (netif_running(netdev)) {
5305                 if (e1000_up(adapter)) {
5306                         pr_info("can't bring device back up after reset\n");
5307                         return;
5308                 }
5309         }
5310
5311         netif_device_attach(netdev);
5312 }
5313
5314 /* e1000_main.c */
This page took 0.354943 seconds and 4 git commands to generate.