1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /* ethtool support for ixgbe */
30 #include <linux/types.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/vmalloc.h>
37 #include <linux/uaccess.h>
42 #define IXGBE_ALL_RAR_ENTRIES 16
44 enum {NETDEV_STATS, IXGBE_STATS};
47 char stat_string[ETH_GSTRING_LEN];
53 #define IXGBE_STAT(m) IXGBE_STATS, \
54 sizeof(((struct ixgbe_adapter *)0)->m), \
55 offsetof(struct ixgbe_adapter, m)
56 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
57 sizeof(((struct rtnl_link_stats64 *)0)->m), \
58 offsetof(struct rtnl_link_stats64, m)
60 static struct ixgbe_stats ixgbe_gstrings_stats[] = {
61 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
62 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
63 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
64 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
65 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
66 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
67 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
68 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
69 {"lsc_int", IXGBE_STAT(lsc_int)},
70 {"tx_busy", IXGBE_STAT(tx_busy)},
71 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
72 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
73 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
74 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
75 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
76 {"multicast", IXGBE_NETDEV_STAT(multicast)},
77 {"broadcast", IXGBE_STAT(stats.bprc)},
78 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
79 {"collisions", IXGBE_NETDEV_STAT(collisions)},
80 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
81 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
82 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
83 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
87 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
88 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
89 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
90 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
91 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
92 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
93 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
94 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
95 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
96 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
97 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
98 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
99 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
100 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
101 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
102 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
103 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
104 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
105 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
106 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
107 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
108 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
109 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
111 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
112 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
113 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
114 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
115 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
116 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
117 #endif /* IXGBE_FCOE */
120 #define IXGBE_QUEUE_STATS_LEN \
121 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
122 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
123 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
124 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
125 #define IXGBE_PB_STATS_LEN ( \
126 (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
127 IXGBE_FLAG_DCB_ENABLED) ? \
128 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
129 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
130 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
131 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
133 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
134 IXGBE_PB_STATS_LEN + \
135 IXGBE_QUEUE_STATS_LEN)
137 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
138 "Register test (offline)", "Eeprom test (offline)",
139 "Interrupt test (offline)", "Loopback test (offline)",
140 "Link test (on/offline)"
142 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
144 static int ixgbe_get_settings(struct net_device *netdev,
145 struct ethtool_cmd *ecmd)
147 struct ixgbe_adapter *adapter = netdev_priv(netdev);
148 struct ixgbe_hw *hw = &adapter->hw;
152 ecmd->supported = SUPPORTED_10000baseT_Full;
153 ecmd->autoneg = AUTONEG_ENABLE;
154 ecmd->transceiver = XCVR_EXTERNAL;
155 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
156 (hw->phy.multispeed_fiber)) {
157 ecmd->supported |= (SUPPORTED_1000baseT_Full |
160 switch (hw->mac.type) {
162 ecmd->supported |= SUPPORTED_100baseT_Full;
168 ecmd->advertising = ADVERTISED_Autoneg;
169 if (hw->phy.autoneg_advertised) {
170 if (hw->phy.autoneg_advertised &
171 IXGBE_LINK_SPEED_100_FULL)
172 ecmd->advertising |= ADVERTISED_100baseT_Full;
173 if (hw->phy.autoneg_advertised &
174 IXGBE_LINK_SPEED_10GB_FULL)
175 ecmd->advertising |= ADVERTISED_10000baseT_Full;
176 if (hw->phy.autoneg_advertised &
177 IXGBE_LINK_SPEED_1GB_FULL)
178 ecmd->advertising |= ADVERTISED_1000baseT_Full;
181 * Default advertised modes in case
182 * phy.autoneg_advertised isn't set.
184 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
185 ADVERTISED_1000baseT_Full);
186 if (hw->mac.type == ixgbe_mac_X540)
187 ecmd->advertising |= ADVERTISED_100baseT_Full;
190 if (hw->phy.media_type == ixgbe_media_type_copper) {
191 ecmd->supported |= SUPPORTED_TP;
192 ecmd->advertising |= ADVERTISED_TP;
193 ecmd->port = PORT_TP;
195 ecmd->supported |= SUPPORTED_FIBRE;
196 ecmd->advertising |= ADVERTISED_FIBRE;
197 ecmd->port = PORT_FIBRE;
199 } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
200 /* Set as FIBRE until SERDES defined in kernel */
201 if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
202 ecmd->supported = (SUPPORTED_1000baseT_Full |
204 ecmd->advertising = (ADVERTISED_1000baseT_Full |
206 ecmd->port = PORT_FIBRE;
207 ecmd->autoneg = AUTONEG_DISABLE;
208 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
209 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
210 ecmd->supported |= (SUPPORTED_1000baseT_Full |
213 ecmd->advertising = (ADVERTISED_10000baseT_Full |
214 ADVERTISED_1000baseT_Full |
217 ecmd->port = PORT_FIBRE;
219 ecmd->supported |= (SUPPORTED_1000baseT_Full |
221 ecmd->advertising = (ADVERTISED_10000baseT_Full |
222 ADVERTISED_1000baseT_Full |
224 ecmd->port = PORT_FIBRE;
227 ecmd->supported |= SUPPORTED_FIBRE;
228 ecmd->advertising = (ADVERTISED_10000baseT_Full |
230 ecmd->port = PORT_FIBRE;
231 ecmd->autoneg = AUTONEG_DISABLE;
235 switch (adapter->hw.phy.type) {
238 case ixgbe_phy_cu_unknown:
239 /* Copper 10G-BASET */
240 ecmd->port = PORT_TP;
243 ecmd->port = PORT_FIBRE;
246 case ixgbe_phy_sfp_passive_tyco:
247 case ixgbe_phy_sfp_passive_unknown:
248 case ixgbe_phy_sfp_ftl:
249 case ixgbe_phy_sfp_avago:
250 case ixgbe_phy_sfp_intel:
251 case ixgbe_phy_sfp_unknown:
252 switch (adapter->hw.phy.sfp_type) {
253 /* SFP+ devices, further checking needed */
254 case ixgbe_sfp_type_da_cu:
255 case ixgbe_sfp_type_da_cu_core0:
256 case ixgbe_sfp_type_da_cu_core1:
257 ecmd->port = PORT_DA;
259 case ixgbe_sfp_type_sr:
260 case ixgbe_sfp_type_lr:
261 case ixgbe_sfp_type_srlr_core0:
262 case ixgbe_sfp_type_srlr_core1:
263 ecmd->port = PORT_FIBRE;
265 case ixgbe_sfp_type_not_present:
266 ecmd->port = PORT_NONE;
268 case ixgbe_sfp_type_1g_cu_core0:
269 case ixgbe_sfp_type_1g_cu_core1:
270 ecmd->port = PORT_TP;
271 ecmd->supported = SUPPORTED_TP;
272 ecmd->advertising = (ADVERTISED_1000baseT_Full |
275 case ixgbe_sfp_type_unknown:
277 ecmd->port = PORT_OTHER;
282 ecmd->port = PORT_NONE;
284 case ixgbe_phy_unknown:
285 case ixgbe_phy_generic:
286 case ixgbe_phy_sfp_unsupported:
288 ecmd->port = PORT_OTHER;
292 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
294 switch (link_speed) {
295 case IXGBE_LINK_SPEED_10GB_FULL:
296 ethtool_cmd_speed_set(ecmd, SPEED_10000);
298 case IXGBE_LINK_SPEED_1GB_FULL:
299 ethtool_cmd_speed_set(ecmd, SPEED_1000);
301 case IXGBE_LINK_SPEED_100_FULL:
302 ethtool_cmd_speed_set(ecmd, SPEED_100);
307 ecmd->duplex = DUPLEX_FULL;
309 ethtool_cmd_speed_set(ecmd, -1);
316 static int ixgbe_set_settings(struct net_device *netdev,
317 struct ethtool_cmd *ecmd)
319 struct ixgbe_adapter *adapter = netdev_priv(netdev);
320 struct ixgbe_hw *hw = &adapter->hw;
324 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
325 (hw->phy.multispeed_fiber)) {
326 /* 10000/copper and 1000/copper must autoneg
327 * this function does not support any duplex forcing, but can
328 * limit the advertising of the adapter to only 10000 or 1000 */
329 if (ecmd->autoneg == AUTONEG_DISABLE)
332 old = hw->phy.autoneg_advertised;
334 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
335 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
337 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
338 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
340 if (ecmd->advertising & ADVERTISED_100baseT_Full)
341 advertised |= IXGBE_LINK_SPEED_100_FULL;
343 if (old == advertised)
345 /* this sets the link speed and restarts auto-neg */
346 hw->mac.autotry_restart = true;
347 err = hw->mac.ops.setup_link(hw, advertised, true, true);
349 e_info(probe, "setup link failed with code %d\n", err);
350 hw->mac.ops.setup_link(hw, old, true, true);
353 /* in this case we currently only support 10Gb/FULL */
354 u32 speed = ethtool_cmd_speed(ecmd);
355 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
356 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
357 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
364 static void ixgbe_get_pauseparam(struct net_device *netdev,
365 struct ethtool_pauseparam *pause)
367 struct ixgbe_adapter *adapter = netdev_priv(netdev);
368 struct ixgbe_hw *hw = &adapter->hw;
371 * Flow Control Autoneg isn't on if
372 * - we didn't ask for it OR
373 * - it failed, we know this by tx & rx being off
375 if (hw->fc.disable_fc_autoneg ||
376 (hw->fc.current_mode == ixgbe_fc_none))
381 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
383 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
385 } else if (hw->fc.current_mode == ixgbe_fc_full) {
389 } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
396 static int ixgbe_set_pauseparam(struct net_device *netdev,
397 struct ethtool_pauseparam *pause)
399 struct ixgbe_adapter *adapter = netdev_priv(netdev);
400 struct ixgbe_hw *hw = &adapter->hw;
401 struct ixgbe_fc_info fc;
404 if (adapter->dcb_cfg.pfc_mode_enable ||
405 ((hw->mac.type == ixgbe_mac_82598EB) &&
406 (adapter->flags & IXGBE_FLAG_DCB_ENABLED)))
412 if (pause->autoneg != AUTONEG_ENABLE)
413 fc.disable_fc_autoneg = true;
415 fc.disable_fc_autoneg = false;
417 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
418 fc.requested_mode = ixgbe_fc_full;
419 else if (pause->rx_pause && !pause->tx_pause)
420 fc.requested_mode = ixgbe_fc_rx_pause;
421 else if (!pause->rx_pause && pause->tx_pause)
422 fc.requested_mode = ixgbe_fc_tx_pause;
423 else if (!pause->rx_pause && !pause->tx_pause)
424 fc.requested_mode = ixgbe_fc_none;
429 adapter->last_lfc_mode = fc.requested_mode;
432 /* if the thing changed then we'll update and use new autoneg */
433 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
435 if (netif_running(netdev))
436 ixgbe_reinit_locked(adapter);
438 ixgbe_reset(adapter);
444 static u32 ixgbe_get_rx_csum(struct net_device *netdev)
446 struct ixgbe_adapter *adapter = netdev_priv(netdev);
447 return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
450 static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
452 struct ixgbe_adapter *adapter = netdev_priv(netdev);
454 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
456 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
461 static u32 ixgbe_get_tx_csum(struct net_device *netdev)
463 return (netdev->features & NETIF_F_IP_CSUM) != 0;
466 static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
468 struct ixgbe_adapter *adapter = netdev_priv(netdev);
471 feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
472 switch (adapter->hw.mac.type) {
473 case ixgbe_mac_82599EB:
475 feature_list |= NETIF_F_SCTP_CSUM;
481 netdev->features |= feature_list;
483 netdev->features &= ~feature_list;
488 static int ixgbe_set_tso(struct net_device *netdev, u32 data)
491 netdev->features |= NETIF_F_TSO;
492 netdev->features |= NETIF_F_TSO6;
494 netdev->features &= ~NETIF_F_TSO;
495 netdev->features &= ~NETIF_F_TSO6;
500 static u32 ixgbe_get_msglevel(struct net_device *netdev)
502 struct ixgbe_adapter *adapter = netdev_priv(netdev);
503 return adapter->msg_enable;
506 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
508 struct ixgbe_adapter *adapter = netdev_priv(netdev);
509 adapter->msg_enable = data;
512 static int ixgbe_get_regs_len(struct net_device *netdev)
514 #define IXGBE_REGS_LEN 1128
515 return IXGBE_REGS_LEN * sizeof(u32);
518 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
520 static void ixgbe_get_regs(struct net_device *netdev,
521 struct ethtool_regs *regs, void *p)
523 struct ixgbe_adapter *adapter = netdev_priv(netdev);
524 struct ixgbe_hw *hw = &adapter->hw;
528 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
530 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
532 /* General Registers */
533 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
534 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
535 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
536 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
537 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
538 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
539 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
540 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
543 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
544 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
545 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
546 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
547 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
548 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
549 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
550 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
551 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
552 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
555 /* don't read EICR because it can clear interrupt causes, instead
556 * read EICS which is a shadow but doesn't clear EICR */
557 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
558 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
559 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
560 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
561 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
562 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
563 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
564 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
565 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
566 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
567 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
568 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
571 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
572 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
573 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
574 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
575 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
576 for (i = 0; i < 8; i++) {
577 switch (hw->mac.type) {
578 case ixgbe_mac_82598EB:
579 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
580 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
582 case ixgbe_mac_82599EB:
583 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
584 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
590 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
591 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
594 for (i = 0; i < 64; i++)
595 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
596 for (i = 0; i < 64; i++)
597 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
598 for (i = 0; i < 64; i++)
599 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
600 for (i = 0; i < 64; i++)
601 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
602 for (i = 0; i < 64; i++)
603 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
604 for (i = 0; i < 64; i++)
605 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
606 for (i = 0; i < 16; i++)
607 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
608 for (i = 0; i < 16; i++)
609 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
610 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
611 for (i = 0; i < 8; i++)
612 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
613 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
614 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
617 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
618 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
619 for (i = 0; i < 16; i++)
620 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
621 for (i = 0; i < 16; i++)
622 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
623 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
624 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
625 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
626 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
627 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
628 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
629 for (i = 0; i < 8; i++)
630 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
631 for (i = 0; i < 8; i++)
632 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
633 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
636 for (i = 0; i < 32; i++)
637 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
638 for (i = 0; i < 32; i++)
639 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
640 for (i = 0; i < 32; i++)
641 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
642 for (i = 0; i < 32; i++)
643 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
644 for (i = 0; i < 32; i++)
645 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
646 for (i = 0; i < 32; i++)
647 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
648 for (i = 0; i < 32; i++)
649 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
650 for (i = 0; i < 32; i++)
651 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
652 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
653 for (i = 0; i < 16; i++)
654 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
655 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
656 for (i = 0; i < 8; i++)
657 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
658 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
661 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
662 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
663 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
664 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
665 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
666 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
667 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
668 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
669 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
672 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
673 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
674 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
675 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
676 for (i = 0; i < 8; i++)
677 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
678 for (i = 0; i < 8; i++)
679 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
680 for (i = 0; i < 8; i++)
681 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
682 for (i = 0; i < 8; i++)
683 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
684 for (i = 0; i < 8; i++)
685 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
686 for (i = 0; i < 8; i++)
687 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
690 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
691 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
692 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
693 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
694 for (i = 0; i < 8; i++)
695 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
696 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
697 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
698 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
699 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
700 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
701 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
702 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
703 for (i = 0; i < 8; i++)
704 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
705 for (i = 0; i < 8; i++)
706 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
707 for (i = 0; i < 8; i++)
708 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
709 for (i = 0; i < 8; i++)
710 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
711 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
712 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
713 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
714 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
715 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
716 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
717 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
718 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
719 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
720 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
721 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
722 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
723 for (i = 0; i < 8; i++)
724 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
725 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
726 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
727 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
728 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
729 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
730 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
731 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
732 regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
733 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
734 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
735 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
736 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
737 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
738 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
739 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
740 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
741 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
742 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
743 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
744 for (i = 0; i < 16; i++)
745 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
746 for (i = 0; i < 16; i++)
747 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
748 for (i = 0; i < 16; i++)
749 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
750 for (i = 0; i < 16; i++)
751 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
754 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
755 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
756 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
757 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
758 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
759 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
760 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
761 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
762 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
763 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
764 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
765 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
766 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
767 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
768 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
769 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
770 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
771 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
772 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
773 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
774 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
775 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
776 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
777 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
778 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
779 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
780 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
781 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
782 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
783 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
784 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
785 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
786 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
789 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
790 for (i = 0; i < 8; i++)
791 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
792 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
793 for (i = 0; i < 4; i++)
794 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
795 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
796 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
797 for (i = 0; i < 8; i++)
798 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
799 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
800 for (i = 0; i < 4; i++)
801 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
802 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
803 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
804 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
805 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
806 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
807 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
808 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
809 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
810 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
811 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
812 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
813 for (i = 0; i < 8; i++)
814 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
815 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
816 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
817 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
818 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
819 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
820 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
821 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
822 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
823 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
826 static int ixgbe_get_eeprom_len(struct net_device *netdev)
828 struct ixgbe_adapter *adapter = netdev_priv(netdev);
829 return adapter->hw.eeprom.word_size * 2;
832 static int ixgbe_get_eeprom(struct net_device *netdev,
833 struct ethtool_eeprom *eeprom, u8 *bytes)
835 struct ixgbe_adapter *adapter = netdev_priv(netdev);
836 struct ixgbe_hw *hw = &adapter->hw;
838 int first_word, last_word, eeprom_len;
842 if (eeprom->len == 0)
845 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
847 first_word = eeprom->offset >> 1;
848 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
849 eeprom_len = last_word - first_word + 1;
851 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
855 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
858 /* Device's eeprom is always little-endian, word addressable */
859 for (i = 0; i < eeprom_len; i++)
860 le16_to_cpus(&eeprom_buff[i]);
862 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
868 static void ixgbe_get_drvinfo(struct net_device *netdev,
869 struct ethtool_drvinfo *drvinfo)
871 struct ixgbe_adapter *adapter = netdev_priv(netdev);
872 char firmware_version[32];
874 strncpy(drvinfo->driver, ixgbe_driver_name,
875 sizeof(drvinfo->driver) - 1);
876 strncpy(drvinfo->version, ixgbe_driver_version,
877 sizeof(drvinfo->version) - 1);
879 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
880 (adapter->eeprom_version & 0xF000) >> 12,
881 (adapter->eeprom_version & 0x0FF0) >> 4,
882 adapter->eeprom_version & 0x000F);
884 strncpy(drvinfo->fw_version, firmware_version,
885 sizeof(drvinfo->fw_version));
886 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
887 sizeof(drvinfo->bus_info));
888 drvinfo->n_stats = IXGBE_STATS_LEN;
889 drvinfo->testinfo_len = IXGBE_TEST_LEN;
890 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
893 static void ixgbe_get_ringparam(struct net_device *netdev,
894 struct ethtool_ringparam *ring)
896 struct ixgbe_adapter *adapter = netdev_priv(netdev);
897 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
898 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
900 ring->rx_max_pending = IXGBE_MAX_RXD;
901 ring->tx_max_pending = IXGBE_MAX_TXD;
902 ring->rx_mini_max_pending = 0;
903 ring->rx_jumbo_max_pending = 0;
904 ring->rx_pending = rx_ring->count;
905 ring->tx_pending = tx_ring->count;
906 ring->rx_mini_pending = 0;
907 ring->rx_jumbo_pending = 0;
910 static int ixgbe_set_ringparam(struct net_device *netdev,
911 struct ethtool_ringparam *ring)
913 struct ixgbe_adapter *adapter = netdev_priv(netdev);
914 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
916 u32 new_rx_count, new_tx_count;
917 bool need_update = false;
919 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
922 new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
923 new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
924 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
926 new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
927 new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
928 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
930 if ((new_tx_count == adapter->tx_ring[0]->count) &&
931 (new_rx_count == adapter->rx_ring[0]->count)) {
936 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
937 usleep_range(1000, 2000);
939 if (!netif_running(adapter->netdev)) {
940 for (i = 0; i < adapter->num_tx_queues; i++)
941 adapter->tx_ring[i]->count = new_tx_count;
942 for (i = 0; i < adapter->num_rx_queues; i++)
943 adapter->rx_ring[i]->count = new_rx_count;
944 adapter->tx_ring_count = new_tx_count;
945 adapter->rx_ring_count = new_rx_count;
949 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
955 if (new_tx_count != adapter->tx_ring_count) {
956 for (i = 0; i < adapter->num_tx_queues; i++) {
957 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
958 sizeof(struct ixgbe_ring));
959 temp_tx_ring[i].count = new_tx_count;
960 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
964 ixgbe_free_tx_resources(&temp_tx_ring[i]);
972 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
978 if (new_rx_count != adapter->rx_ring_count) {
979 for (i = 0; i < adapter->num_rx_queues; i++) {
980 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
981 sizeof(struct ixgbe_ring));
982 temp_rx_ring[i].count = new_rx_count;
983 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
987 ixgbe_free_rx_resources(&temp_rx_ring[i]);
995 /* if rings need to be updated, here's the place to do it in one shot */
1000 if (new_tx_count != adapter->tx_ring_count) {
1001 for (i = 0; i < adapter->num_tx_queues; i++) {
1002 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1003 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
1004 sizeof(struct ixgbe_ring));
1006 adapter->tx_ring_count = new_tx_count;
1010 if (new_rx_count != adapter->rx_ring_count) {
1011 for (i = 0; i < adapter->num_rx_queues; i++) {
1012 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1013 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
1014 sizeof(struct ixgbe_ring));
1016 adapter->rx_ring_count = new_rx_count;
1021 vfree(temp_rx_ring);
1023 vfree(temp_tx_ring);
1025 clear_bit(__IXGBE_RESETTING, &adapter->state);
1029 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1033 return IXGBE_TEST_LEN;
1035 return IXGBE_STATS_LEN;
1041 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1042 struct ethtool_stats *stats, u64 *data)
1044 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1045 struct rtnl_link_stats64 temp;
1046 const struct rtnl_link_stats64 *net_stats;
1048 struct ixgbe_ring *ring;
1052 ixgbe_update_stats(adapter);
1053 net_stats = dev_get_stats(netdev, &temp);
1054 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1055 switch (ixgbe_gstrings_stats[i].type) {
1057 p = (char *) net_stats +
1058 ixgbe_gstrings_stats[i].stat_offset;
1061 p = (char *) adapter +
1062 ixgbe_gstrings_stats[i].stat_offset;
1066 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1067 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1069 for (j = 0; j < adapter->num_tx_queues; j++) {
1070 ring = adapter->tx_ring[j];
1072 start = u64_stats_fetch_begin_bh(&ring->syncp);
1073 data[i] = ring->stats.packets;
1074 data[i+1] = ring->stats.bytes;
1075 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1078 for (j = 0; j < adapter->num_rx_queues; j++) {
1079 ring = adapter->rx_ring[j];
1081 start = u64_stats_fetch_begin_bh(&ring->syncp);
1082 data[i] = ring->stats.packets;
1083 data[i+1] = ring->stats.bytes;
1084 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1087 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1088 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
1089 data[i++] = adapter->stats.pxontxc[j];
1090 data[i++] = adapter->stats.pxofftxc[j];
1092 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
1093 data[i++] = adapter->stats.pxonrxc[j];
1094 data[i++] = adapter->stats.pxoffrxc[j];
1099 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1102 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1103 char *p = (char *)data;
1106 switch (stringset) {
1108 memcpy(data, *ixgbe_gstrings_test,
1109 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
1112 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1113 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1115 p += ETH_GSTRING_LEN;
1117 for (i = 0; i < adapter->num_tx_queues; i++) {
1118 sprintf(p, "tx_queue_%u_packets", i);
1119 p += ETH_GSTRING_LEN;
1120 sprintf(p, "tx_queue_%u_bytes", i);
1121 p += ETH_GSTRING_LEN;
1123 for (i = 0; i < adapter->num_rx_queues; i++) {
1124 sprintf(p, "rx_queue_%u_packets", i);
1125 p += ETH_GSTRING_LEN;
1126 sprintf(p, "rx_queue_%u_bytes", i);
1127 p += ETH_GSTRING_LEN;
1129 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1130 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
1131 sprintf(p, "tx_pb_%u_pxon", i);
1132 p += ETH_GSTRING_LEN;
1133 sprintf(p, "tx_pb_%u_pxoff", i);
1134 p += ETH_GSTRING_LEN;
1136 for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
1137 sprintf(p, "rx_pb_%u_pxon", i);
1138 p += ETH_GSTRING_LEN;
1139 sprintf(p, "rx_pb_%u_pxoff", i);
1140 p += ETH_GSTRING_LEN;
1143 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1148 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1150 struct ixgbe_hw *hw = &adapter->hw;
1155 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1163 /* ethtool register test data */
1164 struct ixgbe_reg_test {
1172 /* In the hardware, registers are laid out either singly, in arrays
1173 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1174 * most tests take place on arrays or single registers (handled
1175 * as a single-element array) and special-case the tables.
1176 * Table tests are always pattern tests.
1178 * We also make provision for some required setup steps by specifying
1179 * registers to be written without any read-back testing.
1182 #define PATTERN_TEST 1
1183 #define SET_READ_TEST 2
1184 #define WRITE_NO_TEST 3
1185 #define TABLE32_TEST 4
1186 #define TABLE64_TEST_LO 5
1187 #define TABLE64_TEST_HI 6
1189 /* default 82599 register test */
1190 static const struct ixgbe_reg_test reg_test_82599[] = {
1191 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1192 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1193 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1194 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1195 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1196 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1197 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1198 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1199 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1200 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1201 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1202 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1203 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1204 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1205 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1206 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1207 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1208 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1209 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1213 /* default 82598 register test */
1214 static const struct ixgbe_reg_test reg_test_82598[] = {
1215 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1216 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1217 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1218 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1219 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1220 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1221 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1222 /* Enable all four RX queues before testing. */
1223 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1224 /* RDH is read-only for 82598, only test RDT. */
1225 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1226 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1227 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1228 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1229 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1230 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1231 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1232 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1233 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1234 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1235 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1236 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1237 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1241 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1242 u32 mask, u32 write)
1244 u32 pat, val, before;
1245 static const u32 test_pattern[] = {
1246 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1248 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1249 before = readl(adapter->hw.hw_addr + reg);
1250 writel((test_pattern[pat] & write),
1251 (adapter->hw.hw_addr + reg));
1252 val = readl(adapter->hw.hw_addr + reg);
1253 if (val != (test_pattern[pat] & write & mask)) {
1254 e_err(drv, "pattern test reg %04X failed: got "
1255 "0x%08X expected 0x%08X\n",
1256 reg, val, (test_pattern[pat] & write & mask));
1258 writel(before, adapter->hw.hw_addr + reg);
1261 writel(before, adapter->hw.hw_addr + reg);
1266 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1267 u32 mask, u32 write)
1270 before = readl(adapter->hw.hw_addr + reg);
1271 writel((write & mask), (adapter->hw.hw_addr + reg));
1272 val = readl(adapter->hw.hw_addr + reg);
1273 if ((write & mask) != (val & mask)) {
1274 e_err(drv, "set/check reg %04X test failed: got 0x%08X "
1275 "expected 0x%08X\n", reg, (val & mask), (write & mask));
1277 writel(before, (adapter->hw.hw_addr + reg));
1280 writel(before, (adapter->hw.hw_addr + reg));
1284 #define REG_PATTERN_TEST(reg, mask, write) \
1286 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1291 #define REG_SET_AND_CHECK(reg, mask, write) \
1293 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1297 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1299 const struct ixgbe_reg_test *test;
1300 u32 value, before, after;
1303 switch (adapter->hw.mac.type) {
1304 case ixgbe_mac_82598EB:
1305 toggle = 0x7FFFF3FF;
1306 test = reg_test_82598;
1308 case ixgbe_mac_82599EB:
1309 case ixgbe_mac_X540:
1310 toggle = 0x7FFFF30F;
1311 test = reg_test_82599;
1320 * Because the status register is such a special case,
1321 * we handle it separately from the rest of the register
1322 * tests. Some bits are read-only, some toggle, and some
1323 * are writeable on newer MACs.
1325 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1326 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1327 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1328 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1329 if (value != after) {
1330 e_err(drv, "failed STATUS register test got: 0x%08X "
1331 "expected: 0x%08X\n", after, value);
1335 /* restore previous status */
1336 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
1339 * Perform the remainder of the register test, looping through
1340 * the test table until we either fail or reach the null entry.
1343 for (i = 0; i < test->array_len; i++) {
1344 switch (test->test_type) {
1346 REG_PATTERN_TEST(test->reg + (i * 0x40),
1351 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1357 (adapter->hw.hw_addr + test->reg)
1361 REG_PATTERN_TEST(test->reg + (i * 4),
1365 case TABLE64_TEST_LO:
1366 REG_PATTERN_TEST(test->reg + (i * 8),
1370 case TABLE64_TEST_HI:
1371 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1384 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1386 struct ixgbe_hw *hw = &adapter->hw;
1387 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1394 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1396 struct net_device *netdev = (struct net_device *) data;
1397 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1399 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1404 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1406 struct net_device *netdev = adapter->netdev;
1407 u32 mask, i = 0, shared_int = true;
1408 u32 irq = adapter->pdev->irq;
1412 /* Hook up test interrupt handler just for this test */
1413 if (adapter->msix_entries) {
1414 /* NOTE: we don't test MSI-X interrupts here, yet */
1416 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1418 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1423 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1424 netdev->name, netdev)) {
1426 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1427 netdev->name, netdev)) {
1431 e_info(hw, "testing %s interrupt\n", shared_int ?
1432 "shared" : "unshared");
1434 /* Disable all the interrupts */
1435 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1436 usleep_range(10000, 20000);
1438 /* Test each interrupt */
1439 for (; i < 10; i++) {
1440 /* Interrupt to test */
1445 * Disable the interrupts to be reported in
1446 * the cause register and then force the same
1447 * interrupt and see if one gets posted. If
1448 * an interrupt was posted to the bus, the
1451 adapter->test_icr = 0;
1452 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1453 ~mask & 0x00007FFF);
1454 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1455 ~mask & 0x00007FFF);
1456 usleep_range(10000, 20000);
1458 if (adapter->test_icr & mask) {
1465 * Enable the interrupt to be reported in the cause
1466 * register and then force the same interrupt and see
1467 * if one gets posted. If an interrupt was not posted
1468 * to the bus, the test failed.
1470 adapter->test_icr = 0;
1471 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1472 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1473 usleep_range(10000, 20000);
1475 if (!(adapter->test_icr &mask)) {
1482 * Disable the other interrupts to be reported in
1483 * the cause register and then force the other
1484 * interrupts and see if any get posted. If
1485 * an interrupt was posted to the bus, the
1488 adapter->test_icr = 0;
1489 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1490 ~mask & 0x00007FFF);
1491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1492 ~mask & 0x00007FFF);
1493 usleep_range(10000, 20000);
1495 if (adapter->test_icr) {
1502 /* Disable all the interrupts */
1503 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1504 usleep_range(10000, 20000);
1506 /* Unhook test interrupt handler */
1507 free_irq(irq, netdev);
1512 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1514 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1515 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1516 struct ixgbe_hw *hw = &adapter->hw;
1519 /* shut down the DMA engines now so they can be reinitialized later */
1522 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1523 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1524 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1525 ixgbe_disable_rx_queue(adapter, rx_ring);
1528 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1529 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1530 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1532 switch (hw->mac.type) {
1533 case ixgbe_mac_82599EB:
1534 case ixgbe_mac_X540:
1535 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1536 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1537 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1543 ixgbe_reset(adapter);
1545 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1546 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1549 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1551 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1552 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1557 /* Setup Tx descriptor ring and Tx buffers */
1558 tx_ring->count = IXGBE_DEFAULT_TXD;
1559 tx_ring->queue_index = 0;
1560 tx_ring->dev = &adapter->pdev->dev;
1561 tx_ring->netdev = adapter->netdev;
1562 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1563 tx_ring->numa_node = adapter->node;
1565 err = ixgbe_setup_tx_resources(tx_ring);
1569 switch (adapter->hw.mac.type) {
1570 case ixgbe_mac_82599EB:
1571 case ixgbe_mac_X540:
1572 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1573 reg_data |= IXGBE_DMATXCTL_TE;
1574 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1580 ixgbe_configure_tx_ring(adapter, tx_ring);
1582 /* Setup Rx Descriptor ring and Rx buffers */
1583 rx_ring->count = IXGBE_DEFAULT_RXD;
1584 rx_ring->queue_index = 0;
1585 rx_ring->dev = &adapter->pdev->dev;
1586 rx_ring->netdev = adapter->netdev;
1587 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1588 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1589 rx_ring->numa_node = adapter->node;
1591 err = ixgbe_setup_rx_resources(rx_ring);
1597 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1600 ixgbe_configure_rx_ring(adapter, rx_ring);
1602 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1603 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1608 ixgbe_free_desc_rings(adapter);
1612 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1614 struct ixgbe_hw *hw = &adapter->hw;
1617 /* X540 needs to set the MACC.FLU bit to force link up */
1618 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1619 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC);
1620 reg_data |= IXGBE_MACC_FLU;
1621 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data);
1624 /* right now we only support MAC loopback in the driver */
1625 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1626 /* Setup MAC loopback */
1627 reg_data |= IXGBE_HLREG0_LPBK;
1628 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1630 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1631 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1632 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1634 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
1635 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1636 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1637 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1638 IXGBE_WRITE_FLUSH(&adapter->hw);
1639 usleep_range(10000, 20000);
1641 /* Disable Atlas Tx lanes; re-enabled in reset path */
1642 if (hw->mac.type == ixgbe_mac_82598EB) {
1645 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1646 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1647 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1649 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1650 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1651 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1653 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1654 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1655 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1657 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1658 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1659 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1665 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1669 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1670 reg_data &= ~IXGBE_HLREG0_LPBK;
1671 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1674 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1675 unsigned int frame_size)
1677 memset(skb->data, 0xFF, frame_size);
1679 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1680 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1681 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1684 static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1685 unsigned int frame_size)
1688 if (*(skb->data + 3) == 0xFF) {
1689 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1690 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1697 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1698 struct ixgbe_ring *tx_ring,
1701 union ixgbe_adv_rx_desc *rx_desc;
1702 struct ixgbe_rx_buffer *rx_buffer_info;
1703 struct ixgbe_tx_buffer *tx_buffer_info;
1704 const int bufsz = rx_ring->rx_buf_len;
1706 u16 rx_ntc, tx_ntc, count = 0;
1708 /* initialize next to clean and descriptor values */
1709 rx_ntc = rx_ring->next_to_clean;
1710 tx_ntc = tx_ring->next_to_clean;
1711 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1712 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1714 while (staterr & IXGBE_RXD_STAT_DD) {
1715 /* check Rx buffer */
1716 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1718 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1719 dma_unmap_single(rx_ring->dev,
1720 rx_buffer_info->dma,
1723 rx_buffer_info->dma = 0;
1725 /* verify contents of skb */
1726 if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
1729 /* unmap buffer on Tx side */
1730 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1731 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1733 /* increment Rx/Tx next to clean counters */
1735 if (rx_ntc == rx_ring->count)
1738 if (tx_ntc == tx_ring->count)
1741 /* fetch next descriptor */
1742 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1743 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1746 /* re-map buffers to ring, store next to clean values */
1747 ixgbe_alloc_rx_buffers(rx_ring, count);
1748 rx_ring->next_to_clean = rx_ntc;
1749 tx_ring->next_to_clean = tx_ntc;
1754 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1756 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1757 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1758 int i, j, lc, good_cnt, ret_val = 0;
1759 unsigned int size = 1024;
1760 netdev_tx_t tx_ret_val;
1761 struct sk_buff *skb;
1763 /* allocate test skb */
1764 skb = alloc_skb(size, GFP_KERNEL);
1768 /* place data into test skb */
1769 ixgbe_create_lbtest_frame(skb, size);
1773 * Calculate the loop count based on the largest descriptor ring
1774 * The idea is to wrap the largest ring a number of times using 64
1775 * send/receive pairs during each loop
1778 if (rx_ring->count <= tx_ring->count)
1779 lc = ((tx_ring->count / 64) * 2) + 1;
1781 lc = ((rx_ring->count / 64) * 2) + 1;
1783 for (j = 0; j <= lc; j++) {
1784 /* reset count of good packets */
1787 /* place 64 packets on the transmit queue*/
1788 for (i = 0; i < 64; i++) {
1790 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1793 if (tx_ret_val == NETDEV_TX_OK)
1797 if (good_cnt != 64) {
1802 /* allow 200 milliseconds for packets to go from Tx to Rx */
1805 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1806 if (good_cnt != 64) {
1812 /* free the original skb */
1818 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1820 *data = ixgbe_setup_desc_rings(adapter);
1823 *data = ixgbe_setup_loopback_test(adapter);
1826 *data = ixgbe_run_loopback_test(adapter);
1827 ixgbe_loopback_cleanup(adapter);
1830 ixgbe_free_desc_rings(adapter);
1835 static void ixgbe_diag_test(struct net_device *netdev,
1836 struct ethtool_test *eth_test, u64 *data)
1838 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1839 bool if_running = netif_running(netdev);
1841 set_bit(__IXGBE_TESTING, &adapter->state);
1842 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1845 e_info(hw, "offline testing starting\n");
1847 /* Link test performed before hardware reset so autoneg doesn't
1848 * interfere with test result */
1849 if (ixgbe_link_test(adapter, &data[4]))
1850 eth_test->flags |= ETH_TEST_FL_FAILED;
1852 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1854 for (i = 0; i < adapter->num_vfs; i++) {
1855 if (adapter->vfinfo[i].clear_to_send) {
1856 netdev_warn(netdev, "%s",
1857 "offline diagnostic is not "
1858 "supported when VFs are "
1864 eth_test->flags |= ETH_TEST_FL_FAILED;
1865 clear_bit(__IXGBE_TESTING,
1873 /* indicate we're in test mode */
1876 ixgbe_reset(adapter);
1878 e_info(hw, "register testing starting\n");
1879 if (ixgbe_reg_test(adapter, &data[0]))
1880 eth_test->flags |= ETH_TEST_FL_FAILED;
1882 ixgbe_reset(adapter);
1883 e_info(hw, "eeprom testing starting\n");
1884 if (ixgbe_eeprom_test(adapter, &data[1]))
1885 eth_test->flags |= ETH_TEST_FL_FAILED;
1887 ixgbe_reset(adapter);
1888 e_info(hw, "interrupt testing starting\n");
1889 if (ixgbe_intr_test(adapter, &data[2]))
1890 eth_test->flags |= ETH_TEST_FL_FAILED;
1892 /* If SRIOV or VMDq is enabled then skip MAC
1893 * loopback diagnostic. */
1894 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1895 IXGBE_FLAG_VMDQ_ENABLED)) {
1896 e_info(hw, "Skip MAC loopback diagnostic in VT "
1902 ixgbe_reset(adapter);
1903 e_info(hw, "loopback testing starting\n");
1904 if (ixgbe_loopback_test(adapter, &data[3]))
1905 eth_test->flags |= ETH_TEST_FL_FAILED;
1908 ixgbe_reset(adapter);
1910 clear_bit(__IXGBE_TESTING, &adapter->state);
1914 e_info(hw, "online testing starting\n");
1916 if (ixgbe_link_test(adapter, &data[4]))
1917 eth_test->flags |= ETH_TEST_FL_FAILED;
1919 /* Online tests aren't run; pass by default */
1925 clear_bit(__IXGBE_TESTING, &adapter->state);
1928 msleep_interruptible(4 * 1000);
1931 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1932 struct ethtool_wolinfo *wol)
1934 struct ixgbe_hw *hw = &adapter->hw;
1937 /* WOL not supported except for the following */
1938 switch(hw->device_id) {
1939 case IXGBE_DEV_ID_82599_SFP:
1940 /* Only this subdevice supports WOL */
1941 if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
1947 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1948 /* All except this subdevice support WOL */
1949 if (hw->subsystem_device_id ==
1950 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1956 case IXGBE_DEV_ID_82599_KX4:
1966 static void ixgbe_get_wol(struct net_device *netdev,
1967 struct ethtool_wolinfo *wol)
1969 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1971 wol->supported = WAKE_UCAST | WAKE_MCAST |
1972 WAKE_BCAST | WAKE_MAGIC;
1975 if (ixgbe_wol_exclusion(adapter, wol) ||
1976 !device_can_wakeup(&adapter->pdev->dev))
1979 if (adapter->wol & IXGBE_WUFC_EX)
1980 wol->wolopts |= WAKE_UCAST;
1981 if (adapter->wol & IXGBE_WUFC_MC)
1982 wol->wolopts |= WAKE_MCAST;
1983 if (adapter->wol & IXGBE_WUFC_BC)
1984 wol->wolopts |= WAKE_BCAST;
1985 if (adapter->wol & IXGBE_WUFC_MAG)
1986 wol->wolopts |= WAKE_MAGIC;
1989 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1991 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1993 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1996 if (ixgbe_wol_exclusion(adapter, wol))
1997 return wol->wolopts ? -EOPNOTSUPP : 0;
2001 if (wol->wolopts & WAKE_UCAST)
2002 adapter->wol |= IXGBE_WUFC_EX;
2003 if (wol->wolopts & WAKE_MCAST)
2004 adapter->wol |= IXGBE_WUFC_MC;
2005 if (wol->wolopts & WAKE_BCAST)
2006 adapter->wol |= IXGBE_WUFC_BC;
2007 if (wol->wolopts & WAKE_MAGIC)
2008 adapter->wol |= IXGBE_WUFC_MAG;
2010 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2015 static int ixgbe_nway_reset(struct net_device *netdev)
2017 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2019 if (netif_running(netdev))
2020 ixgbe_reinit_locked(adapter);
2025 static int ixgbe_set_phys_id(struct net_device *netdev,
2026 enum ethtool_phys_id_state state)
2028 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2029 struct ixgbe_hw *hw = &adapter->hw;
2032 case ETHTOOL_ID_ACTIVE:
2033 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2037 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2040 case ETHTOOL_ID_OFF:
2041 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2044 case ETHTOOL_ID_INACTIVE:
2045 /* Restore LED settings */
2046 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2053 static int ixgbe_get_coalesce(struct net_device *netdev,
2054 struct ethtool_coalesce *ec)
2056 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2058 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
2060 /* only valid if in constant ITR mode */
2061 switch (adapter->rx_itr_setting) {
2063 /* throttling disabled */
2064 ec->rx_coalesce_usecs = 0;
2067 /* dynamic ITR mode */
2068 ec->rx_coalesce_usecs = 1;
2071 /* fixed interrupt rate mode */
2072 ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
2076 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2077 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
2080 /* only valid if in constant ITR mode */
2081 switch (adapter->tx_itr_setting) {
2083 /* throttling disabled */
2084 ec->tx_coalesce_usecs = 0;
2087 /* dynamic ITR mode */
2088 ec->tx_coalesce_usecs = 1;
2091 ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
2099 * this function must be called before setting the new value of
2102 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
2103 struct ethtool_coalesce *ec)
2105 struct net_device *netdev = adapter->netdev;
2107 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2110 /* if interrupt rate is too high then disable RSC */
2111 if (ec->rx_coalesce_usecs != 1 &&
2112 ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
2113 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2114 e_info(probe, "rx-usecs set too low, "
2116 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2120 /* check the feature flag value and enable RSC if necessary */
2121 if ((netdev->features & NETIF_F_LRO) &&
2122 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2123 e_info(probe, "rx-usecs set to %d, "
2124 "re-enabling RSC\n",
2125 ec->rx_coalesce_usecs);
2126 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2133 static int ixgbe_set_coalesce(struct net_device *netdev,
2134 struct ethtool_coalesce *ec)
2136 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2137 struct ixgbe_q_vector *q_vector;
2139 bool need_reset = false;
2141 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2142 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
2143 && ec->tx_coalesce_usecs)
2146 if (ec->tx_max_coalesced_frames_irq)
2147 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2149 if (ec->rx_coalesce_usecs > 1) {
2150 /* check the limits */
2151 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2152 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2155 /* check the old value and enable RSC if necessary */
2156 need_reset = ixgbe_update_rsc(adapter, ec);
2158 /* store the value in ints/second */
2159 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2161 /* static value of interrupt rate */
2162 adapter->rx_itr_setting = adapter->rx_eitr_param;
2163 /* clear the lower bit as its used for dynamic state */
2164 adapter->rx_itr_setting &= ~1;
2165 } else if (ec->rx_coalesce_usecs == 1) {
2166 /* check the old value and enable RSC if necessary */
2167 need_reset = ixgbe_update_rsc(adapter, ec);
2169 /* 1 means dynamic mode */
2170 adapter->rx_eitr_param = 20000;
2171 adapter->rx_itr_setting = 1;
2173 /* check the old value and enable RSC if necessary */
2174 need_reset = ixgbe_update_rsc(adapter, ec);
2176 * any other value means disable eitr, which is best
2177 * served by setting the interrupt rate very high
2179 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2180 adapter->rx_itr_setting = 0;
2183 if (ec->tx_coalesce_usecs > 1) {
2185 * don't have to worry about max_int as above because
2186 * tx vectors don't do hardware RSC (an rx function)
2188 /* check the limits */
2189 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2190 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2193 /* store the value in ints/second */
2194 adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
2196 /* static value of interrupt rate */
2197 adapter->tx_itr_setting = adapter->tx_eitr_param;
2199 /* clear the lower bit as its used for dynamic state */
2200 adapter->tx_itr_setting &= ~1;
2201 } else if (ec->tx_coalesce_usecs == 1) {
2202 /* 1 means dynamic mode */
2203 adapter->tx_eitr_param = 10000;
2204 adapter->tx_itr_setting = 1;
2206 adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
2207 adapter->tx_itr_setting = 0;
2210 /* MSI/MSIx Interrupt Mode */
2211 if (adapter->flags &
2212 (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
2213 int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2214 for (i = 0; i < num_vectors; i++) {
2215 q_vector = adapter->q_vector[i];
2216 if (q_vector->txr_count && !q_vector->rxr_count)
2218 q_vector->eitr = adapter->tx_eitr_param;
2220 /* rx only or mixed */
2221 q_vector->eitr = adapter->rx_eitr_param;
2222 ixgbe_write_eitr(q_vector);
2224 /* Legacy Interrupt Mode */
2226 q_vector = adapter->q_vector[0];
2227 q_vector->eitr = adapter->rx_eitr_param;
2228 ixgbe_write_eitr(q_vector);
2232 * do reset here at the end to make sure EITR==0 case is handled
2233 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2234 * also locks in RSC enable/disable which requires reset
2237 if (netif_running(netdev))
2238 ixgbe_reinit_locked(adapter);
2240 ixgbe_reset(adapter);
2246 static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2248 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2249 bool need_reset = false;
2252 #ifdef CONFIG_IXGBE_DCB
2253 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
2254 !(data & ETH_FLAG_RXVLAN))
2258 need_reset = (data & ETH_FLAG_RXVLAN) !=
2259 (netdev->features & NETIF_F_HW_VLAN_RX);
2261 if ((data & ETH_FLAG_RXHASH) &&
2262 !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2265 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
2266 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
2271 /* if state changes we need to update adapter->flags and reset */
2272 if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2273 (!!(data & ETH_FLAG_LRO) !=
2274 !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2275 if ((data & ETH_FLAG_LRO) &&
2276 (!adapter->rx_itr_setting ||
2277 (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
2278 e_info(probe, "rx-usecs set too low, "
2279 "not enabling RSC.\n");
2281 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2282 switch (adapter->hw.mac.type) {
2283 case ixgbe_mac_82599EB:
2286 case ixgbe_mac_X540: {
2288 for (i = 0; i < adapter->num_rx_queues; i++) {
2289 struct ixgbe_ring *ring =
2290 adapter->rx_ring[i];
2291 if (adapter->flags2 &
2292 IXGBE_FLAG2_RSC_ENABLED) {
2293 ixgbe_configure_rscctl(adapter,
2296 ixgbe_clear_rscctl(adapter,
2309 * Check if Flow Director n-tuple support was enabled or disabled. If
2310 * the state changed, we need to reset.
2312 if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
2313 (!(data & ETH_FLAG_NTUPLE))) {
2314 /* turn off Flow Director perfect, set hash and reset */
2315 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2316 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
2318 } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
2319 (data & ETH_FLAG_NTUPLE)) {
2320 /* turn off Flow Director hash, enable perfect and reset */
2321 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
2322 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
2325 /* no state change */
2329 if (netif_running(netdev))
2330 ixgbe_reinit_locked(adapter);
2332 ixgbe_reset(adapter);
2338 static int ixgbe_set_rx_ntuple(struct net_device *dev,
2339 struct ethtool_rx_ntuple *cmd)
2341 struct ixgbe_adapter *adapter = netdev_priv(dev);
2342 struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
2343 union ixgbe_atr_input input_struct;
2344 struct ixgbe_atr_input_masks input_masks;
2348 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2352 * Don't allow programming if the action is a queue greater than
2353 * the number of online Tx queues.
2355 if ((fs->action >= adapter->num_tx_queues) ||
2356 (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
2359 memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
2360 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
2362 /* record flow type */
2363 switch (fs->flow_type) {
2365 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2368 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2371 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2374 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2380 /* copy vlan tag minus the CFI bit */
2381 if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
2382 input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
2383 if (!fs->vlan_tag_mask) {
2384 input_masks.vlan_id_mask = htons(0xEFFF);
2386 switch (~fs->vlan_tag_mask & 0xEFFF) {
2387 /* all of these are valid vlan-mask values */
2392 input_masks.vlan_id_mask =
2393 htons(~fs->vlan_tag_mask);
2395 /* exit with error if vlan-mask is invalid */
2397 e_err(drv, "Partial VLAN ID or "
2398 "priority mask in vlan-mask is not "
2399 "supported by hardware\n");
2405 /* make sure we only use the first 2 bytes of user data */
2406 if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
2407 input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
2408 if (!(fs->data_mask & 0xFFFF)) {
2409 input_masks.flex_mask = 0xFFFF;
2410 } else if (~fs->data_mask & 0xFFFF) {
2411 e_err(drv, "Partial user-def-mask is not "
2412 "supported by hardware\n");
2418 * Copy input into formatted structures
2420 * These assignments are based on the following logic
2421 * If neither input or mask are set assume value is masked out.
2422 * If input is set, but mask is not mask should default to accept all.
2423 * If input is not set, but mask is set then mask likely results in 0.
2424 * If input is set and mask is set then assign both.
2426 if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
2427 input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
2428 if (!fs->m_u.tcp_ip4_spec.ip4src)
2429 input_masks.src_ip_mask[0] = 0xFFFFFFFF;
2431 input_masks.src_ip_mask[0] =
2432 ~fs->m_u.tcp_ip4_spec.ip4src;
2434 if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
2435 input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
2436 if (!fs->m_u.tcp_ip4_spec.ip4dst)
2437 input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
2439 input_masks.dst_ip_mask[0] =
2440 ~fs->m_u.tcp_ip4_spec.ip4dst;
2442 if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
2443 input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
2444 if (!fs->m_u.tcp_ip4_spec.psrc)
2445 input_masks.src_port_mask = 0xFFFF;
2447 input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
2449 if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
2450 input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
2451 if (!fs->m_u.tcp_ip4_spec.pdst)
2452 input_masks.dst_port_mask = 0xFFFF;
2454 input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
2457 /* determine if we need to drop or route the packet */
2458 if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
2459 target_queue = MAX_RX_QUEUES - 1;
2461 target_queue = fs->action;
2463 spin_lock(&adapter->fdir_perfect_lock);
2464 err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
2468 spin_unlock(&adapter->fdir_perfect_lock);
2470 return err ? -1 : 0;
2473 static const struct ethtool_ops ixgbe_ethtool_ops = {
2474 .get_settings = ixgbe_get_settings,
2475 .set_settings = ixgbe_set_settings,
2476 .get_drvinfo = ixgbe_get_drvinfo,
2477 .get_regs_len = ixgbe_get_regs_len,
2478 .get_regs = ixgbe_get_regs,
2479 .get_wol = ixgbe_get_wol,
2480 .set_wol = ixgbe_set_wol,
2481 .nway_reset = ixgbe_nway_reset,
2482 .get_link = ethtool_op_get_link,
2483 .get_eeprom_len = ixgbe_get_eeprom_len,
2484 .get_eeprom = ixgbe_get_eeprom,
2485 .get_ringparam = ixgbe_get_ringparam,
2486 .set_ringparam = ixgbe_set_ringparam,
2487 .get_pauseparam = ixgbe_get_pauseparam,
2488 .set_pauseparam = ixgbe_set_pauseparam,
2489 .get_rx_csum = ixgbe_get_rx_csum,
2490 .set_rx_csum = ixgbe_set_rx_csum,
2491 .get_tx_csum = ixgbe_get_tx_csum,
2492 .set_tx_csum = ixgbe_set_tx_csum,
2493 .get_sg = ethtool_op_get_sg,
2494 .set_sg = ethtool_op_set_sg,
2495 .get_msglevel = ixgbe_get_msglevel,
2496 .set_msglevel = ixgbe_set_msglevel,
2497 .get_tso = ethtool_op_get_tso,
2498 .set_tso = ixgbe_set_tso,
2499 .self_test = ixgbe_diag_test,
2500 .get_strings = ixgbe_get_strings,
2501 .set_phys_id = ixgbe_set_phys_id,
2502 .get_sset_count = ixgbe_get_sset_count,
2503 .get_ethtool_stats = ixgbe_get_ethtool_stats,
2504 .get_coalesce = ixgbe_get_coalesce,
2505 .set_coalesce = ixgbe_set_coalesce,
2506 .get_flags = ethtool_op_get_flags,
2507 .set_flags = ixgbe_set_flags,
2508 .set_rx_ntuple = ixgbe_set_rx_ntuple,
2511 void ixgbe_set_ethtool_ops(struct net_device *netdev)
2513 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);