1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 /* ethtool support for e1000 */
31 #include <linux/netdevice.h>
32 #include <linux/ethtool.h>
33 #include <linux/pci.h>
34 #include <linux/slab.h>
35 #include <linux/delay.h>
39 enum {NETDEV_STATS, E1000_STATS};
42 char stat_string[ETH_GSTRING_LEN];
48 #define E1000_STAT(str, m) { \
50 .type = E1000_STATS, \
51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
52 .stat_offset = offsetof(struct e1000_adapter, m) }
53 #define E1000_NETDEV_STAT(str, m) { \
55 .type = NETDEV_STATS, \
56 .sizeof_stat = sizeof(((struct net_device *)0)->m), \
57 .stat_offset = offsetof(struct net_device, m) }
59 static const struct e1000_stats e1000_gstrings_stats[] = {
60 E1000_STAT("rx_packets", stats.gprc),
61 E1000_STAT("tx_packets", stats.gptc),
62 E1000_STAT("rx_bytes", stats.gorc),
63 E1000_STAT("tx_bytes", stats.gotc),
64 E1000_STAT("rx_broadcast", stats.bprc),
65 E1000_STAT("tx_broadcast", stats.bptc),
66 E1000_STAT("rx_multicast", stats.mprc),
67 E1000_STAT("tx_multicast", stats.mptc),
68 E1000_NETDEV_STAT("rx_errors", stats.rx_errors),
69 E1000_NETDEV_STAT("tx_errors", stats.tx_errors),
70 E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped),
71 E1000_STAT("multicast", stats.mprc),
72 E1000_STAT("collisions", stats.colc),
73 E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors),
74 E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors),
75 E1000_STAT("rx_crc_errors", stats.crcerrs),
76 E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors),
77 E1000_STAT("rx_no_buffer_count", stats.rnbc),
78 E1000_STAT("rx_missed_errors", stats.mpc),
79 E1000_STAT("tx_aborted_errors", stats.ecol),
80 E1000_STAT("tx_carrier_errors", stats.tncrs),
81 E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors),
82 E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors),
83 E1000_STAT("tx_window_errors", stats.latecol),
84 E1000_STAT("tx_abort_late_coll", stats.latecol),
85 E1000_STAT("tx_deferred_ok", stats.dc),
86 E1000_STAT("tx_single_coll_ok", stats.scc),
87 E1000_STAT("tx_multi_coll_ok", stats.mcc),
88 E1000_STAT("tx_timeout_count", tx_timeout_count),
89 E1000_STAT("tx_restart_queue", restart_queue),
90 E1000_STAT("rx_long_length_errors", stats.roc),
91 E1000_STAT("rx_short_length_errors", stats.ruc),
92 E1000_STAT("rx_align_errors", stats.algnerrc),
93 E1000_STAT("tx_tcp_seg_good", stats.tsctc),
94 E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
95 E1000_STAT("rx_flow_control_xon", stats.xonrxc),
96 E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
97 E1000_STAT("tx_flow_control_xon", stats.xontxc),
98 E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
99 E1000_STAT("rx_long_byte_count", stats.gorc),
100 E1000_STAT("rx_csum_offload_good", hw_csum_good),
101 E1000_STAT("rx_csum_offload_errors", hw_csum_err),
102 E1000_STAT("rx_header_split", rx_hdr_split),
103 E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
104 E1000_STAT("tx_smbus", stats.mgptc),
105 E1000_STAT("rx_smbus", stats.mgprc),
106 E1000_STAT("dropped_smbus", stats.mgpdc),
107 E1000_STAT("rx_dma_failed", rx_dma_failed),
108 E1000_STAT("tx_dma_failed", tx_dma_failed),
111 #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
112 #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN)
113 static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
114 "Register test (offline)", "Eeprom test (offline)",
115 "Interrupt test (offline)", "Loopback test (offline)",
116 "Link test (on/offline)"
118 #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
120 static int e1000_get_settings(struct net_device *netdev,
121 struct ethtool_cmd *ecmd)
123 struct e1000_adapter *adapter = netdev_priv(netdev);
124 struct e1000_hw *hw = &adapter->hw;
126 if (hw->phy.media_type == e1000_media_type_copper) {
128 ecmd->supported = (SUPPORTED_10baseT_Half |
129 SUPPORTED_10baseT_Full |
130 SUPPORTED_100baseT_Half |
131 SUPPORTED_100baseT_Full |
132 SUPPORTED_1000baseT_Full |
135 if (hw->phy.type == e1000_phy_ife)
136 ecmd->supported &= ~SUPPORTED_1000baseT_Full;
137 ecmd->advertising = ADVERTISED_TP;
139 if (hw->mac.autoneg == 1) {
140 ecmd->advertising |= ADVERTISED_Autoneg;
141 /* the e1000 autoneg seems to match ethtool nicely */
142 ecmd->advertising |= hw->phy.autoneg_advertised;
145 ecmd->port = PORT_TP;
146 ecmd->phy_address = hw->phy.addr;
147 ecmd->transceiver = XCVR_INTERNAL;
150 ecmd->supported = (SUPPORTED_1000baseT_Full |
154 ecmd->advertising = (ADVERTISED_1000baseT_Full |
158 ecmd->port = PORT_FIBRE;
159 ecmd->transceiver = XCVR_EXTERNAL;
165 if (netif_running(netdev)) {
166 if (netif_carrier_ok(netdev)) {
167 ecmd->speed = adapter->link_speed;
168 ecmd->duplex = adapter->link_duplex - 1;
171 u32 status = er32(STATUS);
172 if (status & E1000_STATUS_LU) {
173 if (status & E1000_STATUS_SPEED_1000)
175 else if (status & E1000_STATUS_SPEED_100)
180 if (status & E1000_STATUS_FD)
181 ecmd->duplex = DUPLEX_FULL;
183 ecmd->duplex = DUPLEX_HALF;
187 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
188 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
190 /* MDI-X => 2; MDI =>1; Invalid =>0 */
191 if ((hw->phy.media_type == e1000_media_type_copper) &&
192 netif_carrier_ok(netdev))
193 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
196 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
201 static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
203 struct e1000_mac_info *mac = &adapter->hw.mac;
207 /* Fiber NICs only allow 1000 gbps Full duplex */
208 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
209 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
210 e_err("Unsupported Speed/Duplex configuration\n");
215 case SPEED_10 + DUPLEX_HALF:
216 mac->forced_speed_duplex = ADVERTISE_10_HALF;
218 case SPEED_10 + DUPLEX_FULL:
219 mac->forced_speed_duplex = ADVERTISE_10_FULL;
221 case SPEED_100 + DUPLEX_HALF:
222 mac->forced_speed_duplex = ADVERTISE_100_HALF;
224 case SPEED_100 + DUPLEX_FULL:
225 mac->forced_speed_duplex = ADVERTISE_100_FULL;
227 case SPEED_1000 + DUPLEX_FULL:
229 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
231 case SPEED_1000 + DUPLEX_HALF: /* not supported */
233 e_err("Unsupported Speed/Duplex configuration\n");
239 static int e1000_set_settings(struct net_device *netdev,
240 struct ethtool_cmd *ecmd)
242 struct e1000_adapter *adapter = netdev_priv(netdev);
243 struct e1000_hw *hw = &adapter->hw;
246 * When SoL/IDER sessions are active, autoneg/speed/duplex
249 if (e1000_check_reset_block(hw)) {
250 e_err("Cannot change link characteristics when SoL/IDER is "
255 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
258 if (ecmd->autoneg == AUTONEG_ENABLE) {
260 if (hw->phy.media_type == e1000_media_type_fiber)
261 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
265 hw->phy.autoneg_advertised = ecmd->advertising |
268 ecmd->advertising = hw->phy.autoneg_advertised;
269 if (adapter->fc_autoneg)
270 hw->fc.requested_mode = e1000_fc_default;
272 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
273 clear_bit(__E1000_RESETTING, &adapter->state);
280 if (netif_running(adapter->netdev)) {
281 e1000e_down(adapter);
284 e1000e_reset(adapter);
287 clear_bit(__E1000_RESETTING, &adapter->state);
291 static void e1000_get_pauseparam(struct net_device *netdev,
292 struct ethtool_pauseparam *pause)
294 struct e1000_adapter *adapter = netdev_priv(netdev);
295 struct e1000_hw *hw = &adapter->hw;
298 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
300 if (hw->fc.current_mode == e1000_fc_rx_pause) {
302 } else if (hw->fc.current_mode == e1000_fc_tx_pause) {
304 } else if (hw->fc.current_mode == e1000_fc_full) {
310 static int e1000_set_pauseparam(struct net_device *netdev,
311 struct ethtool_pauseparam *pause)
313 struct e1000_adapter *adapter = netdev_priv(netdev);
314 struct e1000_hw *hw = &adapter->hw;
317 adapter->fc_autoneg = pause->autoneg;
319 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
322 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
323 hw->fc.requested_mode = e1000_fc_default;
324 if (netif_running(adapter->netdev)) {
325 e1000e_down(adapter);
328 e1000e_reset(adapter);
331 if (pause->rx_pause && pause->tx_pause)
332 hw->fc.requested_mode = e1000_fc_full;
333 else if (pause->rx_pause && !pause->tx_pause)
334 hw->fc.requested_mode = e1000_fc_rx_pause;
335 else if (!pause->rx_pause && pause->tx_pause)
336 hw->fc.requested_mode = e1000_fc_tx_pause;
337 else if (!pause->rx_pause && !pause->tx_pause)
338 hw->fc.requested_mode = e1000_fc_none;
340 hw->fc.current_mode = hw->fc.requested_mode;
342 if (hw->phy.media_type == e1000_media_type_fiber) {
343 retval = hw->mac.ops.setup_link(hw);
344 /* implicit goto out */
346 retval = e1000e_force_mac_fc(hw);
349 e1000e_set_fc_watermarks(hw);
354 clear_bit(__E1000_RESETTING, &adapter->state);
358 static u32 e1000_get_rx_csum(struct net_device *netdev)
360 struct e1000_adapter *adapter = netdev_priv(netdev);
361 return adapter->flags & FLAG_RX_CSUM_ENABLED;
364 static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
366 struct e1000_adapter *adapter = netdev_priv(netdev);
369 adapter->flags |= FLAG_RX_CSUM_ENABLED;
371 adapter->flags &= ~FLAG_RX_CSUM_ENABLED;
373 if (netif_running(netdev))
374 e1000e_reinit_locked(adapter);
376 e1000e_reset(adapter);
380 static u32 e1000_get_tx_csum(struct net_device *netdev)
382 return (netdev->features & NETIF_F_HW_CSUM) != 0;
385 static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
388 netdev->features |= NETIF_F_HW_CSUM;
390 netdev->features &= ~NETIF_F_HW_CSUM;
395 static int e1000_set_tso(struct net_device *netdev, u32 data)
397 struct e1000_adapter *adapter = netdev_priv(netdev);
400 netdev->features |= NETIF_F_TSO;
401 netdev->features |= NETIF_F_TSO6;
403 netdev->features &= ~NETIF_F_TSO;
404 netdev->features &= ~NETIF_F_TSO6;
407 adapter->flags |= FLAG_TSO_FORCE;
411 static u32 e1000_get_msglevel(struct net_device *netdev)
413 struct e1000_adapter *adapter = netdev_priv(netdev);
414 return adapter->msg_enable;
417 static void e1000_set_msglevel(struct net_device *netdev, u32 data)
419 struct e1000_adapter *adapter = netdev_priv(netdev);
420 adapter->msg_enable = data;
423 static int e1000_get_regs_len(struct net_device *netdev)
425 #define E1000_REGS_LEN 32 /* overestimate */
426 return E1000_REGS_LEN * sizeof(u32);
429 static void e1000_get_regs(struct net_device *netdev,
430 struct ethtool_regs *regs, void *p)
432 struct e1000_adapter *adapter = netdev_priv(netdev);
433 struct e1000_hw *hw = &adapter->hw;
438 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
440 pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id);
442 regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device;
444 regs_buff[0] = er32(CTRL);
445 regs_buff[1] = er32(STATUS);
447 regs_buff[2] = er32(RCTL);
448 regs_buff[3] = er32(RDLEN);
449 regs_buff[4] = er32(RDH);
450 regs_buff[5] = er32(RDT);
451 regs_buff[6] = er32(RDTR);
453 regs_buff[7] = er32(TCTL);
454 regs_buff[8] = er32(TDLEN);
455 regs_buff[9] = er32(TDH);
456 regs_buff[10] = er32(TDT);
457 regs_buff[11] = er32(TIDV);
459 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
461 /* ethtool doesn't use anything past this point, so all this
462 * code is likely legacy junk for apps that may or may not
464 if (hw->phy.type == e1000_phy_m88) {
465 e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
466 regs_buff[13] = (u32)phy_data; /* cable length */
467 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
468 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
469 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
470 e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
471 regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
472 regs_buff[18] = regs_buff[13]; /* cable polarity */
473 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
474 regs_buff[20] = regs_buff[17]; /* polarity correction */
475 /* phy receive errors */
476 regs_buff[22] = adapter->phy_stats.receive_errors;
477 regs_buff[23] = regs_buff[13]; /* mdix mode */
479 regs_buff[21] = 0; /* was idle_errors */
480 e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
481 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
482 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
485 static int e1000_get_eeprom_len(struct net_device *netdev)
487 struct e1000_adapter *adapter = netdev_priv(netdev);
488 return adapter->hw.nvm.word_size * 2;
491 static int e1000_get_eeprom(struct net_device *netdev,
492 struct ethtool_eeprom *eeprom, u8 *bytes)
494 struct e1000_adapter *adapter = netdev_priv(netdev);
495 struct e1000_hw *hw = &adapter->hw;
502 if (eeprom->len == 0)
505 eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16);
507 first_word = eeprom->offset >> 1;
508 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
510 eeprom_buff = kmalloc(sizeof(u16) *
511 (last_word - first_word + 1), GFP_KERNEL);
515 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
516 ret_val = e1000_read_nvm(hw, first_word,
517 last_word - first_word + 1,
520 for (i = 0; i < last_word - first_word + 1; i++) {
521 ret_val = e1000_read_nvm(hw, first_word + i, 1,
529 /* a read error occurred, throw away the result */
530 memset(eeprom_buff, 0xff, sizeof(u16) *
531 (last_word - first_word + 1));
533 /* Device's eeprom is always little-endian, word addressable */
534 for (i = 0; i < last_word - first_word + 1; i++)
535 le16_to_cpus(&eeprom_buff[i]);
538 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
544 static int e1000_set_eeprom(struct net_device *netdev,
545 struct ethtool_eeprom *eeprom, u8 *bytes)
547 struct e1000_adapter *adapter = netdev_priv(netdev);
548 struct e1000_hw *hw = &adapter->hw;
557 if (eeprom->len == 0)
560 if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
563 if (adapter->flags & FLAG_READ_ONLY_NVM)
566 max_len = hw->nvm.word_size * 2;
568 first_word = eeprom->offset >> 1;
569 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
570 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
574 ptr = (void *)eeprom_buff;
576 if (eeprom->offset & 1) {
577 /* need read/modify/write of first changed EEPROM word */
578 /* only the second byte of the word is being modified */
579 ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]);
582 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0))
583 /* need read/modify/write of last changed EEPROM word */
584 /* only the first byte of the word is being modified */
585 ret_val = e1000_read_nvm(hw, last_word, 1,
586 &eeprom_buff[last_word - first_word]);
591 /* Device's eeprom is always little-endian, word addressable */
592 for (i = 0; i < last_word - first_word + 1; i++)
593 le16_to_cpus(&eeprom_buff[i]);
595 memcpy(ptr, bytes, eeprom->len);
597 for (i = 0; i < last_word - first_word + 1; i++)
598 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
600 ret_val = e1000_write_nvm(hw, first_word,
601 last_word - first_word + 1, eeprom_buff);
607 * Update the checksum over the first part of the EEPROM if needed
608 * and flush shadow RAM for applicable controllers
610 if ((first_word <= NVM_CHECKSUM_REG) ||
611 (hw->mac.type == e1000_82583) ||
612 (hw->mac.type == e1000_82574) ||
613 (hw->mac.type == e1000_82573))
614 ret_val = e1000e_update_nvm_checksum(hw);
621 static void e1000_get_drvinfo(struct net_device *netdev,
622 struct ethtool_drvinfo *drvinfo)
624 struct e1000_adapter *adapter = netdev_priv(netdev);
625 char firmware_version[32];
627 strncpy(drvinfo->driver, e1000e_driver_name,
628 sizeof(drvinfo->driver) - 1);
629 strncpy(drvinfo->version, e1000e_driver_version,
630 sizeof(drvinfo->version) - 1);
633 * EEPROM image version # is reported as firmware version # for
636 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
637 (adapter->eeprom_vers & 0xF000) >> 12,
638 (adapter->eeprom_vers & 0x0FF0) >> 4,
639 (adapter->eeprom_vers & 0x000F));
641 strncpy(drvinfo->fw_version, firmware_version,
642 sizeof(drvinfo->fw_version) - 1);
643 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
644 sizeof(drvinfo->bus_info) - 1);
645 drvinfo->regdump_len = e1000_get_regs_len(netdev);
646 drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
649 static void e1000_get_ringparam(struct net_device *netdev,
650 struct ethtool_ringparam *ring)
652 struct e1000_adapter *adapter = netdev_priv(netdev);
653 struct e1000_ring *tx_ring = adapter->tx_ring;
654 struct e1000_ring *rx_ring = adapter->rx_ring;
656 ring->rx_max_pending = E1000_MAX_RXD;
657 ring->tx_max_pending = E1000_MAX_TXD;
658 ring->rx_mini_max_pending = 0;
659 ring->rx_jumbo_max_pending = 0;
660 ring->rx_pending = rx_ring->count;
661 ring->tx_pending = tx_ring->count;
662 ring->rx_mini_pending = 0;
663 ring->rx_jumbo_pending = 0;
666 static int e1000_set_ringparam(struct net_device *netdev,
667 struct ethtool_ringparam *ring)
669 struct e1000_adapter *adapter = netdev_priv(netdev);
670 struct e1000_ring *tx_ring, *tx_old;
671 struct e1000_ring *rx_ring, *rx_old;
674 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
677 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
680 if (netif_running(adapter->netdev))
681 e1000e_down(adapter);
683 tx_old = adapter->tx_ring;
684 rx_old = adapter->rx_ring;
687 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
691 * use a memcpy to save any previously configured
692 * items like napi structs from having to be
695 memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
697 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
700 memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
702 adapter->tx_ring = tx_ring;
703 adapter->rx_ring = rx_ring;
705 rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
706 rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD));
707 rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
709 tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
710 tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD));
711 tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
713 if (netif_running(adapter->netdev)) {
714 /* Try to get new resources before deleting old */
715 err = e1000e_setup_rx_resources(adapter);
718 err = e1000e_setup_tx_resources(adapter);
723 * restore the old in order to free it,
724 * then add in the new
726 adapter->rx_ring = rx_old;
727 adapter->tx_ring = tx_old;
728 e1000e_free_rx_resources(adapter);
729 e1000e_free_tx_resources(adapter);
732 adapter->rx_ring = rx_ring;
733 adapter->tx_ring = tx_ring;
734 err = e1000e_up(adapter);
739 clear_bit(__E1000_RESETTING, &adapter->state);
742 e1000e_free_rx_resources(adapter);
744 adapter->rx_ring = rx_old;
745 adapter->tx_ring = tx_old;
752 clear_bit(__E1000_RESETTING, &adapter->state);
756 static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
757 int reg, int offset, u32 mask, u32 write)
760 static const u32 test[] = {
761 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
762 for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
763 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
764 (test[pat] & write));
765 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
766 if (val != (test[pat] & write & mask)) {
767 e_err("pattern test reg %04X failed: got 0x%08X "
768 "expected 0x%08X\n", reg + offset, val,
769 (test[pat] & write & mask));
777 static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
778 int reg, u32 mask, u32 write)
781 __ew32(&adapter->hw, reg, write & mask);
782 val = __er32(&adapter->hw, reg);
783 if ((write & mask) != (val & mask)) {
784 e_err("set/check reg %04X test failed: got 0x%08X "
785 "expected 0x%08X\n", reg, (val & mask), (write & mask));
791 #define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
793 if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
796 #define REG_PATTERN_TEST(reg, mask, write) \
797 REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
799 #define REG_SET_AND_CHECK(reg, mask, write) \
801 if (reg_set_and_check(adapter, data, reg, mask, write)) \
805 static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
807 struct e1000_hw *hw = &adapter->hw;
808 struct e1000_mac_info *mac = &adapter->hw.mac;
817 * The status register is Read Only, so a write should fail.
818 * Some bits that get toggled are ignored.
821 /* there are several bits on newer hardware that are r/w */
824 case e1000_80003es2lan:
832 before = er32(STATUS);
833 value = (er32(STATUS) & toggle);
834 ew32(STATUS, toggle);
835 after = er32(STATUS) & toggle;
836 if (value != after) {
837 e_err("failed STATUS register test got: 0x%08X expected: "
838 "0x%08X\n", after, value);
842 /* restore previous status */
843 ew32(STATUS, before);
845 if (!(adapter->flags & FLAG_IS_ICH)) {
846 REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
847 REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
848 REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
849 REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
852 REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
853 REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
854 REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF);
855 REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF);
856 REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF);
857 REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
858 REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
859 REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
860 REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
861 REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF);
863 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
865 before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE);
866 REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
867 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
869 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
870 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
871 if (!(adapter->flags & FLAG_IS_ICH))
872 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
873 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
874 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
885 for (i = 0; i < mac->rar_entry_count; i++)
886 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
889 for (i = 0; i < mac->mta_reg_count; i++)
890 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
896 static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
903 /* Read and add up the contents of the EEPROM */
904 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
905 if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
912 /* If Checksum is not Correct return error else test passed */
913 if ((checksum != (u16) NVM_SUM) && !(*data))
919 static irqreturn_t e1000_test_intr(int irq, void *data)
921 struct net_device *netdev = (struct net_device *) data;
922 struct e1000_adapter *adapter = netdev_priv(netdev);
923 struct e1000_hw *hw = &adapter->hw;
925 adapter->test_icr |= er32(ICR);
930 static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
932 struct net_device *netdev = adapter->netdev;
933 struct e1000_hw *hw = &adapter->hw;
936 u32 irq = adapter->pdev->irq;
939 int int_mode = E1000E_INT_MODE_LEGACY;
943 /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
944 if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
945 int_mode = adapter->int_mode;
946 e1000e_reset_interrupt_capability(adapter);
947 adapter->int_mode = E1000E_INT_MODE_LEGACY;
948 e1000e_set_interrupt_capability(adapter);
950 /* Hook up test interrupt handler just for this test */
951 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
954 } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
955 netdev->name, netdev)) {
960 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
962 /* Disable all the interrupts */
963 ew32(IMC, 0xFFFFFFFF);
966 /* Test each interrupt */
967 for (i = 0; i < 10; i++) {
968 /* Interrupt to test */
971 if (adapter->flags & FLAG_IS_ICH) {
973 case E1000_ICR_RXSEQ:
976 if (adapter->hw.mac.type == e1000_ich8lan ||
977 adapter->hw.mac.type == e1000_ich9lan)
987 * Disable the interrupt to be reported in
988 * the cause register and then force the same
989 * interrupt and see if one gets posted. If
990 * an interrupt was posted to the bus, the
993 adapter->test_icr = 0;
998 if (adapter->test_icr & mask) {
1005 * Enable the interrupt to be reported in
1006 * the cause register and then force the same
1007 * interrupt and see if one gets posted. If
1008 * an interrupt was not posted to the bus, the
1011 adapter->test_icr = 0;
1016 if (!(adapter->test_icr & mask)) {
1023 * Disable the other interrupts to be reported in
1024 * the cause register and then force the other
1025 * interrupts and see if any get posted. If
1026 * an interrupt was posted to the bus, the
1029 adapter->test_icr = 0;
1030 ew32(IMC, ~mask & 0x00007FFF);
1031 ew32(ICS, ~mask & 0x00007FFF);
1034 if (adapter->test_icr) {
1041 /* Disable all the interrupts */
1042 ew32(IMC, 0xFFFFFFFF);
1045 /* Unhook test interrupt handler */
1046 free_irq(irq, netdev);
1049 if (int_mode == E1000E_INT_MODE_MSIX) {
1050 e1000e_reset_interrupt_capability(adapter);
1051 adapter->int_mode = int_mode;
1052 e1000e_set_interrupt_capability(adapter);
1058 static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1060 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1061 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1062 struct pci_dev *pdev = adapter->pdev;
1065 if (tx_ring->desc && tx_ring->buffer_info) {
1066 for (i = 0; i < tx_ring->count; i++) {
1067 if (tx_ring->buffer_info[i].dma)
1068 dma_unmap_single(&pdev->dev,
1069 tx_ring->buffer_info[i].dma,
1070 tx_ring->buffer_info[i].length,
1072 if (tx_ring->buffer_info[i].skb)
1073 dev_kfree_skb(tx_ring->buffer_info[i].skb);
1077 if (rx_ring->desc && rx_ring->buffer_info) {
1078 for (i = 0; i < rx_ring->count; i++) {
1079 if (rx_ring->buffer_info[i].dma)
1080 dma_unmap_single(&pdev->dev,
1081 rx_ring->buffer_info[i].dma,
1082 2048, DMA_FROM_DEVICE);
1083 if (rx_ring->buffer_info[i].skb)
1084 dev_kfree_skb(rx_ring->buffer_info[i].skb);
1088 if (tx_ring->desc) {
1089 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1091 tx_ring->desc = NULL;
1093 if (rx_ring->desc) {
1094 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1096 rx_ring->desc = NULL;
1099 kfree(tx_ring->buffer_info);
1100 tx_ring->buffer_info = NULL;
1101 kfree(rx_ring->buffer_info);
1102 rx_ring->buffer_info = NULL;
1105 static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1107 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1108 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1109 struct pci_dev *pdev = adapter->pdev;
1110 struct e1000_hw *hw = &adapter->hw;
1115 /* Setup Tx descriptor ring and Tx buffers */
1117 if (!tx_ring->count)
1118 tx_ring->count = E1000_DEFAULT_TXD;
1120 tx_ring->buffer_info = kcalloc(tx_ring->count,
1121 sizeof(struct e1000_buffer),
1123 if (!(tx_ring->buffer_info)) {
1128 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1129 tx_ring->size = ALIGN(tx_ring->size, 4096);
1130 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1131 &tx_ring->dma, GFP_KERNEL);
1132 if (!tx_ring->desc) {
1136 tx_ring->next_to_use = 0;
1137 tx_ring->next_to_clean = 0;
1139 ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1140 ew32(TDBAH, ((u64) tx_ring->dma >> 32));
1141 ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
1144 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
1145 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1146 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1148 for (i = 0; i < tx_ring->count; i++) {
1149 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
1150 struct sk_buff *skb;
1151 unsigned int skb_size = 1024;
1153 skb = alloc_skb(skb_size, GFP_KERNEL);
1158 skb_put(skb, skb_size);
1159 tx_ring->buffer_info[i].skb = skb;
1160 tx_ring->buffer_info[i].length = skb->len;
1161 tx_ring->buffer_info[i].dma =
1162 dma_map_single(&pdev->dev, skb->data, skb->len,
1164 if (dma_mapping_error(&pdev->dev,
1165 tx_ring->buffer_info[i].dma)) {
1169 tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
1170 tx_desc->lower.data = cpu_to_le32(skb->len);
1171 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1172 E1000_TXD_CMD_IFCS |
1174 tx_desc->upper.data = 0;
1177 /* Setup Rx descriptor ring and Rx buffers */
1179 if (!rx_ring->count)
1180 rx_ring->count = E1000_DEFAULT_RXD;
1182 rx_ring->buffer_info = kcalloc(rx_ring->count,
1183 sizeof(struct e1000_buffer),
1185 if (!(rx_ring->buffer_info)) {
1190 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
1191 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1192 &rx_ring->dma, GFP_KERNEL);
1193 if (!rx_ring->desc) {
1197 rx_ring->next_to_use = 0;
1198 rx_ring->next_to_clean = 0;
1201 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1202 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF));
1203 ew32(RDBAH, ((u64) rx_ring->dma >> 32));
1204 ew32(RDLEN, rx_ring->size);
1207 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1208 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1209 E1000_RCTL_SBP | E1000_RCTL_SECRC |
1210 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1211 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1214 for (i = 0; i < rx_ring->count; i++) {
1215 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
1216 struct sk_buff *skb;
1218 skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL);
1223 skb_reserve(skb, NET_IP_ALIGN);
1224 rx_ring->buffer_info[i].skb = skb;
1225 rx_ring->buffer_info[i].dma =
1226 dma_map_single(&pdev->dev, skb->data, 2048,
1228 if (dma_mapping_error(&pdev->dev,
1229 rx_ring->buffer_info[i].dma)) {
1233 rx_desc->buffer_addr =
1234 cpu_to_le64(rx_ring->buffer_info[i].dma);
1235 memset(skb->data, 0x00, skb->len);
1241 e1000_free_desc_rings(adapter);
1245 static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
1247 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1248 e1e_wphy(&adapter->hw, 29, 0x001F);
1249 e1e_wphy(&adapter->hw, 30, 0x8FFC);
1250 e1e_wphy(&adapter->hw, 29, 0x001A);
1251 e1e_wphy(&adapter->hw, 30, 0x8FF0);
1254 static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1256 struct e1000_hw *hw = &adapter->hw;
1262 hw->mac.autoneg = 0;
1264 if (hw->phy.type == e1000_phy_ife) {
1265 /* force 100, set loopback */
1266 e1e_wphy(hw, PHY_CONTROL, 0x6100);
1268 /* Now set up the MAC to the same speed/duplex as the PHY. */
1269 ctrl_reg = er32(CTRL);
1270 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1271 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1272 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1273 E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1274 E1000_CTRL_FD); /* Force Duplex to FULL */
1276 ew32(CTRL, ctrl_reg);
1282 /* Specific PHY configuration for loopback */
1283 switch (hw->phy.type) {
1285 /* Auto-MDI/MDIX Off */
1286 e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1287 /* reset to update Auto-MDI/MDIX */
1288 e1e_wphy(hw, PHY_CONTROL, 0x9140);
1290 e1e_wphy(hw, PHY_CONTROL, 0x8140);
1292 case e1000_phy_gg82563:
1293 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
1296 /* Set Default MAC Interface speed to 1GB */
1297 e1e_rphy(hw, PHY_REG(2, 21), &phy_reg);
1300 e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
1301 /* Assert SW reset for above settings to take effect */
1302 e1000e_commit_phy(hw);
1304 /* Force Full Duplex */
1305 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1306 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
1307 /* Set Link Up (in force link) */
1308 e1e_rphy(hw, PHY_REG(776, 16), &phy_reg);
1309 e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040);
1311 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1312 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040);
1313 /* Set Early Link Enable */
1314 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1315 e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400);
1317 case e1000_phy_82577:
1318 case e1000_phy_82578:
1319 /* Workaround: K1 must be disabled for stable 1Gbps operation */
1320 ret_val = hw->phy.ops.acquire(hw);
1322 e_err("Cannot setup 1Gbps loopback.\n");
1325 e1000_configure_k1_ich8lan(hw, false);
1326 hw->phy.ops.release(hw);
1328 case e1000_phy_82579:
1329 /* Disable PHY energy detect power down */
1330 e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
1331 e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
1332 /* Disable full chip energy detect */
1333 e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
1334 e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
1335 /* Enable loopback on the PHY */
1336 #define I82577_PHY_LBK_CTRL 19
1337 e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
1343 /* force 1000, set loopback */
1344 e1e_wphy(hw, PHY_CONTROL, 0x4140);
1347 /* Now set up the MAC to the same speed/duplex as the PHY. */
1348 ctrl_reg = er32(CTRL);
1349 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1350 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1351 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1352 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1353 E1000_CTRL_FD); /* Force Duplex to FULL */
1355 if (adapter->flags & FLAG_IS_ICH)
1356 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
1358 if (hw->phy.media_type == e1000_media_type_copper &&
1359 hw->phy.type == e1000_phy_m88) {
1360 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1363 * Set the ILOS bit on the fiber Nic if half duplex link is
1366 stat_reg = er32(STATUS);
1367 if ((stat_reg & E1000_STATUS_FD) == 0)
1368 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1371 ew32(CTRL, ctrl_reg);
1374 * Disable the receiver on the PHY so when a cable is plugged in, the
1375 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1377 if (hw->phy.type == e1000_phy_m88)
1378 e1000_phy_disable_receiver(adapter);
1385 static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1387 struct e1000_hw *hw = &adapter->hw;
1388 u32 ctrl = er32(CTRL);
1391 /* special requirements for 82571/82572 fiber adapters */
1394 * jump through hoops to make sure link is up because serdes
1395 * link is hardwired up
1397 ctrl |= E1000_CTRL_SLU;
1400 /* disable autoneg */
1405 link = (er32(STATUS) & E1000_STATUS_LU);
1408 /* set invert loss of signal */
1410 ctrl |= E1000_CTRL_ILOS;
1415 * special write to serdes control register to enable SerDes analog
1418 #define E1000_SERDES_LB_ON 0x410
1419 ew32(SCTL, E1000_SERDES_LB_ON);
1425 /* only call this for fiber/serdes connections to es2lan */
1426 static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
1428 struct e1000_hw *hw = &adapter->hw;
1429 u32 ctrlext = er32(CTRL_EXT);
1430 u32 ctrl = er32(CTRL);
1433 * save CTRL_EXT to restore later, reuse an empty variable (unused
1434 * on mac_type 80003es2lan)
1436 adapter->tx_fifo_head = ctrlext;
1438 /* clear the serdes mode bits, putting the device into mac loopback */
1439 ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
1440 ew32(CTRL_EXT, ctrlext);
1442 /* force speed to 1000/FD, link up */
1443 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1444 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
1445 E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
1448 /* set mac loopback */
1450 ctrl |= E1000_RCTL_LBM_MAC;
1453 /* set testing mode parameters (no need to reset later) */
1454 #define KMRNCTRLSTA_OPMODE (0x1F << 16)
1455 #define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
1457 (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
1462 static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1464 struct e1000_hw *hw = &adapter->hw;
1467 if (hw->phy.media_type == e1000_media_type_fiber ||
1468 hw->phy.media_type == e1000_media_type_internal_serdes) {
1469 switch (hw->mac.type) {
1470 case e1000_80003es2lan:
1471 return e1000_set_es2lan_mac_loopback(adapter);
1475 return e1000_set_82571_fiber_loopback(adapter);
1479 rctl |= E1000_RCTL_LBM_TCVR;
1483 } else if (hw->phy.media_type == e1000_media_type_copper) {
1484 return e1000_integrated_phy_loopback(adapter);
1490 static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1492 struct e1000_hw *hw = &adapter->hw;
1497 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1500 switch (hw->mac.type) {
1501 case e1000_80003es2lan:
1502 if (hw->phy.media_type == e1000_media_type_fiber ||
1503 hw->phy.media_type == e1000_media_type_internal_serdes) {
1504 /* restore CTRL_EXT, stealing space from tx_fifo_head */
1505 ew32(CTRL_EXT, adapter->tx_fifo_head);
1506 adapter->tx_fifo_head = 0;
1511 if (hw->phy.media_type == e1000_media_type_fiber ||
1512 hw->phy.media_type == e1000_media_type_internal_serdes) {
1513 #define E1000_SERDES_LB_OFF 0x400
1514 ew32(SCTL, E1000_SERDES_LB_OFF);
1520 hw->mac.autoneg = 1;
1521 if (hw->phy.type == e1000_phy_gg82563)
1522 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
1523 e1e_rphy(hw, PHY_CONTROL, &phy_reg);
1524 if (phy_reg & MII_CR_LOOPBACK) {
1525 phy_reg &= ~MII_CR_LOOPBACK;
1526 e1e_wphy(hw, PHY_CONTROL, phy_reg);
1527 e1000e_commit_phy(hw);
1533 static void e1000_create_lbtest_frame(struct sk_buff *skb,
1534 unsigned int frame_size)
1536 memset(skb->data, 0xFF, frame_size);
1538 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1539 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1540 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1543 static int e1000_check_lbtest_frame(struct sk_buff *skb,
1544 unsigned int frame_size)
1547 if (*(skb->data + 3) == 0xFF)
1548 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1549 (*(skb->data + frame_size / 2 + 12) == 0xAF))
1554 static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1556 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1557 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1558 struct pci_dev *pdev = adapter->pdev;
1559 struct e1000_hw *hw = &adapter->hw;
1566 ew32(RDT, rx_ring->count - 1);
1569 * Calculate the loop count based on the largest descriptor ring
1570 * The idea is to wrap the largest ring a number of times using 64
1571 * send/receive pairs during each loop
1574 if (rx_ring->count <= tx_ring->count)
1575 lc = ((tx_ring->count / 64) * 2) + 1;
1577 lc = ((rx_ring->count / 64) * 2) + 1;
1581 for (j = 0; j <= lc; j++) { /* loop count loop */
1582 for (i = 0; i < 64; i++) { /* send the packets */
1583 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1585 dma_sync_single_for_device(&pdev->dev,
1586 tx_ring->buffer_info[k].dma,
1587 tx_ring->buffer_info[k].length,
1590 if (k == tx_ring->count)
1595 time = jiffies; /* set the start time for the receive */
1597 do { /* receive the sent packets */
1598 dma_sync_single_for_cpu(&pdev->dev,
1599 rx_ring->buffer_info[l].dma, 2048,
1602 ret_val = e1000_check_lbtest_frame(
1603 rx_ring->buffer_info[l].skb, 1024);
1607 if (l == rx_ring->count)
1610 * time + 20 msecs (200 msecs on 2.4) is more than
1611 * enough time to complete the receives, if it's
1612 * exceeded, break and error off
1614 } while ((good_cnt < 64) && !time_after(jiffies, time + 20));
1615 if (good_cnt != 64) {
1616 ret_val = 13; /* ret_val is the same as mis-compare */
1619 if (jiffies >= (time + 20)) {
1620 ret_val = 14; /* error code for time out error */
1623 } /* end loop count loop */
1627 static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1630 * PHY loopback cannot be performed if SoL/IDER
1631 * sessions are active
1633 if (e1000_check_reset_block(&adapter->hw)) {
1634 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
1639 *data = e1000_setup_desc_rings(adapter);
1643 *data = e1000_setup_loopback_test(adapter);
1647 *data = e1000_run_loopback_test(adapter);
1648 e1000_loopback_cleanup(adapter);
1651 e1000_free_desc_rings(adapter);
1656 static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1658 struct e1000_hw *hw = &adapter->hw;
1661 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1663 hw->mac.serdes_has_link = false;
1666 * On some blade server designs, link establishment
1667 * could take as long as 2-3 minutes
1670 hw->mac.ops.check_for_link(hw);
1671 if (hw->mac.serdes_has_link)
1674 } while (i++ < 3750);
1678 hw->mac.ops.check_for_link(hw);
1679 if (hw->mac.autoneg)
1682 if (!(er32(STATUS) &
1689 static int e1000e_get_sset_count(struct net_device *netdev, int sset)
1693 return E1000_TEST_LEN;
1695 return E1000_STATS_LEN;
1701 static void e1000_diag_test(struct net_device *netdev,
1702 struct ethtool_test *eth_test, u64 *data)
1704 struct e1000_adapter *adapter = netdev_priv(netdev);
1705 u16 autoneg_advertised;
1706 u8 forced_speed_duplex;
1708 bool if_running = netif_running(netdev);
1710 set_bit(__E1000_TESTING, &adapter->state);
1713 /* Get control of and reset hardware */
1714 if (adapter->flags & FLAG_HAS_AMT)
1715 e1000e_get_hw_control(adapter);
1717 e1000e_power_up_phy(adapter);
1719 adapter->hw.phy.autoneg_wait_to_complete = 1;
1720 e1000e_reset(adapter);
1721 adapter->hw.phy.autoneg_wait_to_complete = 0;
1724 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1727 /* save speed, duplex, autoneg settings */
1728 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1729 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1730 autoneg = adapter->hw.mac.autoneg;
1732 e_info("offline testing starting\n");
1735 /* indicate we're in test mode */
1738 if (e1000_reg_test(adapter, &data[0]))
1739 eth_test->flags |= ETH_TEST_FL_FAILED;
1741 e1000e_reset(adapter);
1742 if (e1000_eeprom_test(adapter, &data[1]))
1743 eth_test->flags |= ETH_TEST_FL_FAILED;
1745 e1000e_reset(adapter);
1746 if (e1000_intr_test(adapter, &data[2]))
1747 eth_test->flags |= ETH_TEST_FL_FAILED;
1749 e1000e_reset(adapter);
1750 if (e1000_loopback_test(adapter, &data[3]))
1751 eth_test->flags |= ETH_TEST_FL_FAILED;
1753 /* force this routine to wait until autoneg complete/timeout */
1754 adapter->hw.phy.autoneg_wait_to_complete = 1;
1755 e1000e_reset(adapter);
1756 adapter->hw.phy.autoneg_wait_to_complete = 0;
1758 if (e1000_link_test(adapter, &data[4]))
1759 eth_test->flags |= ETH_TEST_FL_FAILED;
1761 /* restore speed, duplex, autoneg settings */
1762 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1763 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1764 adapter->hw.mac.autoneg = autoneg;
1765 e1000e_reset(adapter);
1767 clear_bit(__E1000_TESTING, &adapter->state);
1773 e_info("online testing starting\n");
1775 /* register, eeprom, intr and loopback tests not run online */
1781 if (e1000_link_test(adapter, &data[4]))
1782 eth_test->flags |= ETH_TEST_FL_FAILED;
1784 clear_bit(__E1000_TESTING, &adapter->state);
1788 e1000e_reset(adapter);
1790 if (adapter->flags & FLAG_HAS_AMT)
1791 e1000e_release_hw_control(adapter);
1794 msleep_interruptible(4 * 1000);
1797 static void e1000_get_wol(struct net_device *netdev,
1798 struct ethtool_wolinfo *wol)
1800 struct e1000_adapter *adapter = netdev_priv(netdev);
1805 if (!(adapter->flags & FLAG_HAS_WOL) ||
1806 !device_can_wakeup(&adapter->pdev->dev))
1809 wol->supported = WAKE_UCAST | WAKE_MCAST |
1810 WAKE_BCAST | WAKE_MAGIC |
1811 WAKE_PHY | WAKE_ARP;
1813 /* apply any specific unsupported masks here */
1814 if (adapter->flags & FLAG_NO_WAKE_UCAST) {
1815 wol->supported &= ~WAKE_UCAST;
1817 if (adapter->wol & E1000_WUFC_EX)
1818 e_err("Interface does not support directed (unicast) "
1819 "frame wake-up packets\n");
1822 if (adapter->wol & E1000_WUFC_EX)
1823 wol->wolopts |= WAKE_UCAST;
1824 if (adapter->wol & E1000_WUFC_MC)
1825 wol->wolopts |= WAKE_MCAST;
1826 if (adapter->wol & E1000_WUFC_BC)
1827 wol->wolopts |= WAKE_BCAST;
1828 if (adapter->wol & E1000_WUFC_MAG)
1829 wol->wolopts |= WAKE_MAGIC;
1830 if (adapter->wol & E1000_WUFC_LNKC)
1831 wol->wolopts |= WAKE_PHY;
1832 if (adapter->wol & E1000_WUFC_ARP)
1833 wol->wolopts |= WAKE_ARP;
1836 static int e1000_set_wol(struct net_device *netdev,
1837 struct ethtool_wolinfo *wol)
1839 struct e1000_adapter *adapter = netdev_priv(netdev);
1841 if (!(adapter->flags & FLAG_HAS_WOL) ||
1842 !device_can_wakeup(&adapter->pdev->dev) ||
1843 (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1844 WAKE_MAGIC | WAKE_PHY | WAKE_ARP)))
1847 /* these settings will always override what we currently have */
1850 if (wol->wolopts & WAKE_UCAST)
1851 adapter->wol |= E1000_WUFC_EX;
1852 if (wol->wolopts & WAKE_MCAST)
1853 adapter->wol |= E1000_WUFC_MC;
1854 if (wol->wolopts & WAKE_BCAST)
1855 adapter->wol |= E1000_WUFC_BC;
1856 if (wol->wolopts & WAKE_MAGIC)
1857 adapter->wol |= E1000_WUFC_MAG;
1858 if (wol->wolopts & WAKE_PHY)
1859 adapter->wol |= E1000_WUFC_LNKC;
1860 if (wol->wolopts & WAKE_ARP)
1861 adapter->wol |= E1000_WUFC_ARP;
1863 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1868 /* toggle LED 4 times per second = 2 "blinks" per second */
1869 #define E1000_ID_INTERVAL (HZ/4)
1871 /* bit defines for adapter->led_status */
1872 #define E1000_LED_ON 0
1874 void e1000e_led_blink_task(struct work_struct *work)
1876 struct e1000_adapter *adapter = container_of(work,
1877 struct e1000_adapter, led_blink_task);
1879 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1880 adapter->hw.mac.ops.led_off(&adapter->hw);
1882 adapter->hw.mac.ops.led_on(&adapter->hw);
1885 static void e1000_led_blink_callback(unsigned long data)
1887 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1889 schedule_work(&adapter->led_blink_task);
1890 mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
1893 static int e1000_phys_id(struct net_device *netdev, u32 data)
1895 struct e1000_adapter *adapter = netdev_priv(netdev);
1896 struct e1000_hw *hw = &adapter->hw;
1901 if ((hw->phy.type == e1000_phy_ife) ||
1902 (hw->mac.type == e1000_pchlan) ||
1903 (hw->mac.type == e1000_pch2lan) ||
1904 (hw->mac.type == e1000_82583) ||
1905 (hw->mac.type == e1000_82574)) {
1906 if (!adapter->blink_timer.function) {
1907 init_timer(&adapter->blink_timer);
1908 adapter->blink_timer.function =
1909 e1000_led_blink_callback;
1910 adapter->blink_timer.data = (unsigned long) adapter;
1912 mod_timer(&adapter->blink_timer, jiffies);
1913 msleep_interruptible(data * 1000);
1914 del_timer_sync(&adapter->blink_timer);
1915 if (hw->phy.type == e1000_phy_ife)
1916 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
1918 e1000e_blink_led(hw);
1919 msleep_interruptible(data * 1000);
1922 hw->mac.ops.led_off(hw);
1923 clear_bit(E1000_LED_ON, &adapter->led_status);
1924 hw->mac.ops.cleanup_led(hw);
1929 static int e1000_get_coalesce(struct net_device *netdev,
1930 struct ethtool_coalesce *ec)
1932 struct e1000_adapter *adapter = netdev_priv(netdev);
1934 if (adapter->itr_setting <= 4)
1935 ec->rx_coalesce_usecs = adapter->itr_setting;
1937 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
1942 static int e1000_set_coalesce(struct net_device *netdev,
1943 struct ethtool_coalesce *ec)
1945 struct e1000_adapter *adapter = netdev_priv(netdev);
1946 struct e1000_hw *hw = &adapter->hw;
1948 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
1949 ((ec->rx_coalesce_usecs > 4) &&
1950 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
1951 (ec->rx_coalesce_usecs == 2))
1954 if (ec->rx_coalesce_usecs == 4) {
1955 adapter->itr = adapter->itr_setting = 4;
1956 } else if (ec->rx_coalesce_usecs <= 3) {
1957 adapter->itr = 20000;
1958 adapter->itr_setting = ec->rx_coalesce_usecs;
1960 adapter->itr = (1000000 / ec->rx_coalesce_usecs);
1961 adapter->itr_setting = adapter->itr & ~3;
1964 if (adapter->itr_setting != 0)
1965 ew32(ITR, 1000000000 / (adapter->itr * 256));
1972 static int e1000_nway_reset(struct net_device *netdev)
1974 struct e1000_adapter *adapter = netdev_priv(netdev);
1975 if (netif_running(netdev))
1976 e1000e_reinit_locked(adapter);
1980 static void e1000_get_ethtool_stats(struct net_device *netdev,
1981 struct ethtool_stats *stats,
1984 struct e1000_adapter *adapter = netdev_priv(netdev);
1988 e1000e_update_stats(adapter);
1989 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1990 switch (e1000_gstrings_stats[i].type) {
1992 p = (char *) netdev +
1993 e1000_gstrings_stats[i].stat_offset;
1996 p = (char *) adapter +
1997 e1000_gstrings_stats[i].stat_offset;
2004 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
2005 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2009 static void e1000_get_strings(struct net_device *netdev, u32 stringset,
2015 switch (stringset) {
2017 memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
2020 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
2021 memcpy(p, e1000_gstrings_stats[i].stat_string,
2023 p += ETH_GSTRING_LEN;
2029 static const struct ethtool_ops e1000_ethtool_ops = {
2030 .get_settings = e1000_get_settings,
2031 .set_settings = e1000_set_settings,
2032 .get_drvinfo = e1000_get_drvinfo,
2033 .get_regs_len = e1000_get_regs_len,
2034 .get_regs = e1000_get_regs,
2035 .get_wol = e1000_get_wol,
2036 .set_wol = e1000_set_wol,
2037 .get_msglevel = e1000_get_msglevel,
2038 .set_msglevel = e1000_set_msglevel,
2039 .nway_reset = e1000_nway_reset,
2040 .get_link = ethtool_op_get_link,
2041 .get_eeprom_len = e1000_get_eeprom_len,
2042 .get_eeprom = e1000_get_eeprom,
2043 .set_eeprom = e1000_set_eeprom,
2044 .get_ringparam = e1000_get_ringparam,
2045 .set_ringparam = e1000_set_ringparam,
2046 .get_pauseparam = e1000_get_pauseparam,
2047 .set_pauseparam = e1000_set_pauseparam,
2048 .get_rx_csum = e1000_get_rx_csum,
2049 .set_rx_csum = e1000_set_rx_csum,
2050 .get_tx_csum = e1000_get_tx_csum,
2051 .set_tx_csum = e1000_set_tx_csum,
2052 .get_sg = ethtool_op_get_sg,
2053 .set_sg = ethtool_op_set_sg,
2054 .get_tso = ethtool_op_get_tso,
2055 .set_tso = e1000_set_tso,
2056 .self_test = e1000_diag_test,
2057 .get_strings = e1000_get_strings,
2058 .phys_id = e1000_phys_id,
2059 .get_ethtool_stats = e1000_get_ethtool_stats,
2060 .get_sset_count = e1000e_get_sset_count,
2061 .get_coalesce = e1000_get_coalesce,
2062 .set_coalesce = e1000_set_coalesce,
2063 .get_flags = ethtool_op_get_flags,
2066 void e1000e_set_ethtool_ops(struct net_device *netdev)
2068 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);