1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 /* ethtool support for ixgbe */
31 #include <linux/interrupt.h>
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/ethtool.h>
38 #include <linux/vmalloc.h>
39 #include <linux/highmem.h>
40 #include <linux/uaccess.h>
43 #include "ixgbe_phy.h"
46 #define IXGBE_ALL_RAR_ENTRIES 16
48 enum {NETDEV_STATS, IXGBE_STATS};
51 char stat_string[ETH_GSTRING_LEN];
57 #define IXGBE_STAT(m) IXGBE_STATS, \
58 sizeof(((struct ixgbe_adapter *)0)->m), \
59 offsetof(struct ixgbe_adapter, m)
60 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
61 sizeof(((struct rtnl_link_stats64 *)0)->m), \
62 offsetof(struct rtnl_link_stats64, m)
64 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
65 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
66 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
67 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
68 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
69 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
70 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
71 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
72 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
73 {"lsc_int", IXGBE_STAT(lsc_int)},
74 {"tx_busy", IXGBE_STAT(tx_busy)},
75 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
76 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
77 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
78 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
79 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
80 {"multicast", IXGBE_NETDEV_STAT(multicast)},
81 {"broadcast", IXGBE_STAT(stats.bprc)},
82 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
83 {"collisions", IXGBE_NETDEV_STAT(collisions)},
84 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
85 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
86 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
87 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
88 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
89 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
90 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
91 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
92 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
93 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
94 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
95 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
96 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
97 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
98 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
99 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
100 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
101 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
102 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
103 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
104 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
105 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
106 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
107 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
108 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
109 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
110 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
111 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
112 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
113 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
114 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
115 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts)},
116 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped)},
117 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared)},
119 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
120 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
121 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
122 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
123 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
124 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
125 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
126 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
127 #endif /* IXGBE_FCOE */
130 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
131 * we set the num_rx_queues to evaluate to num_tx_queues. This is
132 * used because we do not have a good way to get the max number of
133 * rx queues with CONFIG_RPS disabled.
135 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
137 #define IXGBE_QUEUE_STATS_LEN ( \
138 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
139 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
140 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
141 #define IXGBE_PB_STATS_LEN ( \
142 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
143 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
144 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
145 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
147 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
148 IXGBE_PB_STATS_LEN + \
149 IXGBE_QUEUE_STATS_LEN)
151 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
152 "Register test (offline)", "Eeprom test (offline)",
153 "Interrupt test (offline)", "Loopback test (offline)",
154 "Link test (on/offline)"
156 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
158 static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
159 #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
163 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
165 /* currently supported speeds for 10G */
166 #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
167 SUPPORTED_10000baseKX4_Full | \
168 SUPPORTED_10000baseKR_Full)
170 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
172 static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
174 if (!ixgbe_isbackplane(hw->phy.media_type))
175 return SUPPORTED_10000baseT_Full;
177 switch (hw->device_id) {
178 case IXGBE_DEV_ID_82598:
179 case IXGBE_DEV_ID_82599_KX4:
180 case IXGBE_DEV_ID_82599_KX4_MEZZ:
181 case IXGBE_DEV_ID_X550EM_X_KX4:
182 return SUPPORTED_10000baseKX4_Full;
183 case IXGBE_DEV_ID_82598_BX:
184 case IXGBE_DEV_ID_82599_KR:
185 case IXGBE_DEV_ID_X550EM_X_KR:
186 case IXGBE_DEV_ID_X550EM_X_XFI:
187 return SUPPORTED_10000baseKR_Full;
189 return SUPPORTED_10000baseKX4_Full |
190 SUPPORTED_10000baseKR_Full;
194 static int ixgbe_get_link_ksettings(struct net_device *netdev,
195 struct ethtool_link_ksettings *cmd)
197 struct ixgbe_adapter *adapter = netdev_priv(netdev);
198 struct ixgbe_hw *hw = &adapter->hw;
199 ixgbe_link_speed supported_link;
200 bool autoneg = false;
201 u32 supported, advertising;
203 ethtool_convert_link_mode_to_legacy_u32(&supported,
204 cmd->link_modes.supported);
206 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
208 /* set the supported link speeds */
209 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
210 supported |= ixgbe_get_supported_10gtypes(hw);
211 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
212 supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
213 SUPPORTED_1000baseKX_Full :
214 SUPPORTED_1000baseT_Full;
215 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
216 supported |= SUPPORTED_100baseT_Full;
217 if (supported_link & IXGBE_LINK_SPEED_10_FULL)
218 supported |= SUPPORTED_10baseT_Full;
220 /* default advertised speed if phy.autoneg_advertised isn't set */
221 advertising = supported;
222 /* set the advertised speeds */
223 if (hw->phy.autoneg_advertised) {
225 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
226 advertising |= ADVERTISED_10baseT_Full;
227 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
228 advertising |= ADVERTISED_100baseT_Full;
229 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
230 advertising |= supported & ADVRTSD_MSK_10G;
231 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
232 if (supported & SUPPORTED_1000baseKX_Full)
233 advertising |= ADVERTISED_1000baseKX_Full;
235 advertising |= ADVERTISED_1000baseT_Full;
238 if (hw->phy.multispeed_fiber && !autoneg) {
239 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
240 advertising = ADVERTISED_10000baseT_Full;
245 supported |= SUPPORTED_Autoneg;
246 advertising |= ADVERTISED_Autoneg;
247 cmd->base.autoneg = AUTONEG_ENABLE;
249 cmd->base.autoneg = AUTONEG_DISABLE;
251 /* Determine the remaining settings based on the PHY type. */
252 switch (adapter->hw.phy.type) {
255 case ixgbe_phy_x550em_ext_t:
257 case ixgbe_phy_cu_unknown:
258 supported |= SUPPORTED_TP;
259 advertising |= ADVERTISED_TP;
260 cmd->base.port = PORT_TP;
263 supported |= SUPPORTED_FIBRE;
264 advertising |= ADVERTISED_FIBRE;
265 cmd->base.port = PORT_FIBRE;
268 case ixgbe_phy_sfp_passive_tyco:
269 case ixgbe_phy_sfp_passive_unknown:
270 case ixgbe_phy_sfp_ftl:
271 case ixgbe_phy_sfp_avago:
272 case ixgbe_phy_sfp_intel:
273 case ixgbe_phy_sfp_unknown:
274 case ixgbe_phy_qsfp_passive_unknown:
275 case ixgbe_phy_qsfp_active_unknown:
276 case ixgbe_phy_qsfp_intel:
277 case ixgbe_phy_qsfp_unknown:
278 /* SFP+ devices, further checking needed */
279 switch (adapter->hw.phy.sfp_type) {
280 case ixgbe_sfp_type_da_cu:
281 case ixgbe_sfp_type_da_cu_core0:
282 case ixgbe_sfp_type_da_cu_core1:
283 supported |= SUPPORTED_FIBRE;
284 advertising |= ADVERTISED_FIBRE;
285 cmd->base.port = PORT_DA;
287 case ixgbe_sfp_type_sr:
288 case ixgbe_sfp_type_lr:
289 case ixgbe_sfp_type_srlr_core0:
290 case ixgbe_sfp_type_srlr_core1:
291 case ixgbe_sfp_type_1g_sx_core0:
292 case ixgbe_sfp_type_1g_sx_core1:
293 case ixgbe_sfp_type_1g_lx_core0:
294 case ixgbe_sfp_type_1g_lx_core1:
295 supported |= SUPPORTED_FIBRE;
296 advertising |= ADVERTISED_FIBRE;
297 cmd->base.port = PORT_FIBRE;
299 case ixgbe_sfp_type_not_present:
300 supported |= SUPPORTED_FIBRE;
301 advertising |= ADVERTISED_FIBRE;
302 cmd->base.port = PORT_NONE;
304 case ixgbe_sfp_type_1g_cu_core0:
305 case ixgbe_sfp_type_1g_cu_core1:
306 supported |= SUPPORTED_TP;
307 advertising |= ADVERTISED_TP;
308 cmd->base.port = PORT_TP;
310 case ixgbe_sfp_type_unknown:
312 supported |= SUPPORTED_FIBRE;
313 advertising |= ADVERTISED_FIBRE;
314 cmd->base.port = PORT_OTHER;
319 supported |= SUPPORTED_FIBRE;
320 advertising |= ADVERTISED_FIBRE;
321 cmd->base.port = PORT_NONE;
323 case ixgbe_phy_unknown:
324 case ixgbe_phy_generic:
325 case ixgbe_phy_sfp_unsupported:
327 supported |= SUPPORTED_FIBRE;
328 advertising |= ADVERTISED_FIBRE;
329 cmd->base.port = PORT_OTHER;
333 /* Indicate pause support */
334 supported |= SUPPORTED_Pause;
336 switch (hw->fc.requested_mode) {
338 advertising |= ADVERTISED_Pause;
340 case ixgbe_fc_rx_pause:
341 advertising |= ADVERTISED_Pause |
342 ADVERTISED_Asym_Pause;
344 case ixgbe_fc_tx_pause:
345 advertising |= ADVERTISED_Asym_Pause;
348 advertising &= ~(ADVERTISED_Pause |
349 ADVERTISED_Asym_Pause);
352 if (netif_carrier_ok(netdev)) {
353 switch (adapter->link_speed) {
354 case IXGBE_LINK_SPEED_10GB_FULL:
355 cmd->base.speed = SPEED_10000;
357 case IXGBE_LINK_SPEED_5GB_FULL:
358 cmd->base.speed = SPEED_5000;
360 case IXGBE_LINK_SPEED_2_5GB_FULL:
361 cmd->base.speed = SPEED_2500;
363 case IXGBE_LINK_SPEED_1GB_FULL:
364 cmd->base.speed = SPEED_1000;
366 case IXGBE_LINK_SPEED_100_FULL:
367 cmd->base.speed = SPEED_100;
369 case IXGBE_LINK_SPEED_10_FULL:
370 cmd->base.speed = SPEED_10;
375 cmd->base.duplex = DUPLEX_FULL;
377 cmd->base.speed = SPEED_UNKNOWN;
378 cmd->base.duplex = DUPLEX_UNKNOWN;
381 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
383 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
389 static int ixgbe_set_link_ksettings(struct net_device *netdev,
390 const struct ethtool_link_ksettings *cmd)
392 struct ixgbe_adapter *adapter = netdev_priv(netdev);
393 struct ixgbe_hw *hw = &adapter->hw;
396 u32 supported, advertising;
398 ethtool_convert_link_mode_to_legacy_u32(&supported,
399 cmd->link_modes.supported);
400 ethtool_convert_link_mode_to_legacy_u32(&advertising,
401 cmd->link_modes.advertising);
403 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
404 (hw->phy.multispeed_fiber)) {
406 * this function does not support duplex forcing, but can
407 * limit the advertising of the adapter to the specified speed
409 if (advertising & ~supported)
412 /* only allow one speed at a time if no autoneg */
413 if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
415 (ADVERTISED_10000baseT_Full |
416 ADVERTISED_1000baseT_Full))
420 old = hw->phy.autoneg_advertised;
422 if (advertising & ADVERTISED_10000baseT_Full)
423 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
425 if (advertising & ADVERTISED_1000baseT_Full)
426 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
428 if (advertising & ADVERTISED_100baseT_Full)
429 advertised |= IXGBE_LINK_SPEED_100_FULL;
431 if (advertising & ADVERTISED_10baseT_Full)
432 advertised |= IXGBE_LINK_SPEED_10_FULL;
434 if (old == advertised)
436 /* this sets the link speed and restarts auto-neg */
437 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
438 usleep_range(1000, 2000);
440 hw->mac.autotry_restart = true;
441 err = hw->mac.ops.setup_link(hw, advertised, true);
443 e_info(probe, "setup link failed with code %d\n", err);
444 hw->mac.ops.setup_link(hw, old, true);
446 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
448 /* in this case we currently only support 10Gb/FULL */
449 u32 speed = cmd->base.speed;
451 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
452 (advertising != ADVERTISED_10000baseT_Full) ||
453 (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
460 static void ixgbe_get_pauseparam(struct net_device *netdev,
461 struct ethtool_pauseparam *pause)
463 struct ixgbe_adapter *adapter = netdev_priv(netdev);
464 struct ixgbe_hw *hw = &adapter->hw;
466 if (ixgbe_device_supports_autoneg_fc(hw) &&
467 !hw->fc.disable_fc_autoneg)
472 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
474 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
476 } else if (hw->fc.current_mode == ixgbe_fc_full) {
482 static int ixgbe_set_pauseparam(struct net_device *netdev,
483 struct ethtool_pauseparam *pause)
485 struct ixgbe_adapter *adapter = netdev_priv(netdev);
486 struct ixgbe_hw *hw = &adapter->hw;
487 struct ixgbe_fc_info fc = hw->fc;
489 /* 82598 does no support link flow control with DCB enabled */
490 if ((hw->mac.type == ixgbe_mac_82598EB) &&
491 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
494 /* some devices do not support autoneg of link flow control */
495 if ((pause->autoneg == AUTONEG_ENABLE) &&
496 !ixgbe_device_supports_autoneg_fc(hw))
499 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
501 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
502 fc.requested_mode = ixgbe_fc_full;
503 else if (pause->rx_pause && !pause->tx_pause)
504 fc.requested_mode = ixgbe_fc_rx_pause;
505 else if (!pause->rx_pause && pause->tx_pause)
506 fc.requested_mode = ixgbe_fc_tx_pause;
508 fc.requested_mode = ixgbe_fc_none;
510 /* if the thing changed then we'll update and use new autoneg */
511 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
513 if (netif_running(netdev))
514 ixgbe_reinit_locked(adapter);
516 ixgbe_reset(adapter);
522 static u32 ixgbe_get_msglevel(struct net_device *netdev)
524 struct ixgbe_adapter *adapter = netdev_priv(netdev);
525 return adapter->msg_enable;
528 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
530 struct ixgbe_adapter *adapter = netdev_priv(netdev);
531 adapter->msg_enable = data;
534 static int ixgbe_get_regs_len(struct net_device *netdev)
536 #define IXGBE_REGS_LEN 1139
537 return IXGBE_REGS_LEN * sizeof(u32);
540 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
542 static void ixgbe_get_regs(struct net_device *netdev,
543 struct ethtool_regs *regs, void *p)
545 struct ixgbe_adapter *adapter = netdev_priv(netdev);
546 struct ixgbe_hw *hw = &adapter->hw;
550 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
552 regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
555 /* General Registers */
556 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
557 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
558 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
559 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
560 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
561 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
562 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
563 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
566 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
567 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
568 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw));
569 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
570 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
571 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
572 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
573 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
574 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
575 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw));
578 /* don't read EICR because it can clear interrupt causes, instead
579 * read EICS which is a shadow but doesn't clear EICR */
580 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
581 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
582 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
583 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
584 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
585 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
586 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
587 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
588 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
589 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
590 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
591 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
594 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
595 for (i = 0; i < 4; i++)
596 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
597 for (i = 0; i < 8; i++) {
598 switch (hw->mac.type) {
599 case ixgbe_mac_82598EB:
600 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
601 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
603 case ixgbe_mac_82599EB:
606 case ixgbe_mac_X550EM_x:
607 case ixgbe_mac_x550em_a:
608 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
609 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
615 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
616 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
619 for (i = 0; i < 64; i++)
620 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
621 for (i = 0; i < 64; i++)
622 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
623 for (i = 0; i < 64; i++)
624 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
625 for (i = 0; i < 64; i++)
626 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
627 for (i = 0; i < 64; i++)
628 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
629 for (i = 0; i < 64; i++)
630 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
631 for (i = 0; i < 16; i++)
632 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
633 for (i = 0; i < 16; i++)
634 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
635 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
636 for (i = 0; i < 8; i++)
637 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
638 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
639 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
642 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
643 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
644 for (i = 0; i < 16; i++)
645 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
646 for (i = 0; i < 16; i++)
647 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
648 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
649 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
650 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
651 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
652 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
653 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
654 for (i = 0; i < 8; i++)
655 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
656 for (i = 0; i < 8; i++)
657 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
658 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
661 for (i = 0; i < 32; i++)
662 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
663 for (i = 0; i < 32; i++)
664 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
665 for (i = 0; i < 32; i++)
666 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
667 for (i = 0; i < 32; i++)
668 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
669 for (i = 0; i < 32; i++)
670 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
671 for (i = 0; i < 32; i++)
672 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
673 for (i = 0; i < 32; i++)
674 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
675 for (i = 0; i < 32; i++)
676 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
677 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
678 for (i = 0; i < 16; i++)
679 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
680 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
681 for (i = 0; i < 8; i++)
682 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
683 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
686 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
687 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
688 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
689 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
690 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
691 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
692 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
693 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
694 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
697 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
698 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
700 switch (hw->mac.type) {
701 case ixgbe_mac_82598EB:
702 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
703 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
704 for (i = 0; i < 8; i++)
706 IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
707 for (i = 0; i < 8; i++)
709 IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
710 for (i = 0; i < 8; i++)
712 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
713 for (i = 0; i < 8; i++)
715 IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
717 case ixgbe_mac_82599EB:
720 case ixgbe_mac_X550EM_x:
721 case ixgbe_mac_x550em_a:
722 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
723 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
724 for (i = 0; i < 8; i++)
726 IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
727 for (i = 0; i < 8; i++)
729 IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
730 for (i = 0; i < 8; i++)
732 IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
733 for (i = 0; i < 8; i++)
735 IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
741 for (i = 0; i < 8; i++)
743 IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
744 for (i = 0; i < 8; i++)
746 IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
749 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
750 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
751 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
752 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
753 for (i = 0; i < 8; i++)
754 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
755 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
756 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
757 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
758 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
759 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
760 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
761 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
762 for (i = 0; i < 8; i++)
763 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
764 for (i = 0; i < 8; i++)
765 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
766 for (i = 0; i < 8; i++)
767 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
768 for (i = 0; i < 8; i++)
769 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
770 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
771 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
772 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
773 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
774 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
775 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
776 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
777 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
778 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
779 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
780 regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
781 regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
782 regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
783 regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
784 for (i = 0; i < 8; i++)
785 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
786 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
787 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
788 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
789 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
790 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
791 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
792 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
793 regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
794 regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
795 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
796 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
797 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
798 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
799 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
800 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
801 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
802 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
803 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
804 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
805 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
806 for (i = 0; i < 16; i++)
807 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
808 for (i = 0; i < 16; i++)
809 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
810 for (i = 0; i < 16; i++)
811 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
812 for (i = 0; i < 16; i++)
813 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
816 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
817 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
818 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
819 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
820 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
821 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
822 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
823 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
824 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
825 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
826 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
827 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
828 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
829 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
830 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
831 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
832 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
833 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
834 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
835 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
836 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
837 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
838 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
839 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
840 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
841 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
842 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
843 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
844 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
845 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
846 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
847 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
848 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
851 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
852 for (i = 0; i < 8; i++)
853 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
854 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
855 for (i = 0; i < 4; i++)
856 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
857 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
858 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
859 for (i = 0; i < 8; i++)
860 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
861 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
862 for (i = 0; i < 4; i++)
863 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
864 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
865 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
866 for (i = 0; i < 4; i++)
867 regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
868 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
869 for (i = 0; i < 4; i++)
870 regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
871 for (i = 0; i < 8; i++)
872 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
873 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
874 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
875 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
876 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
877 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
878 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
879 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
880 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
881 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
883 /* 82599 X540 specific registers */
884 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
886 /* 82599 X540 specific DCB registers */
887 regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
888 regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
889 for (i = 0; i < 4; i++)
890 regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
891 regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
892 /* same as RTTQCNRM */
893 regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
894 /* same as RTTQCNRR */
896 /* X540 specific DCB registers */
897 regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
898 regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
901 static int ixgbe_get_eeprom_len(struct net_device *netdev)
903 struct ixgbe_adapter *adapter = netdev_priv(netdev);
904 return adapter->hw.eeprom.word_size * 2;
907 static int ixgbe_get_eeprom(struct net_device *netdev,
908 struct ethtool_eeprom *eeprom, u8 *bytes)
910 struct ixgbe_adapter *adapter = netdev_priv(netdev);
911 struct ixgbe_hw *hw = &adapter->hw;
913 int first_word, last_word, eeprom_len;
917 if (eeprom->len == 0)
920 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
922 first_word = eeprom->offset >> 1;
923 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
924 eeprom_len = last_word - first_word + 1;
926 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
930 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
933 /* Device's eeprom is always little-endian, word addressable */
934 for (i = 0; i < eeprom_len; i++)
935 le16_to_cpus(&eeprom_buff[i]);
937 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
943 static int ixgbe_set_eeprom(struct net_device *netdev,
944 struct ethtool_eeprom *eeprom, u8 *bytes)
946 struct ixgbe_adapter *adapter = netdev_priv(netdev);
947 struct ixgbe_hw *hw = &adapter->hw;
950 int max_len, first_word, last_word, ret_val = 0;
953 if (eeprom->len == 0)
956 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
959 max_len = hw->eeprom.word_size * 2;
961 first_word = eeprom->offset >> 1;
962 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
963 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
969 if (eeprom->offset & 1) {
971 * need read/modify/write of first changed EEPROM word
972 * only the second byte of the word is being modified
974 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
980 if ((eeprom->offset + eeprom->len) & 1) {
982 * need read/modify/write of last changed EEPROM word
983 * only the first byte of the word is being modified
985 ret_val = hw->eeprom.ops.read(hw, last_word,
986 &eeprom_buff[last_word - first_word]);
991 /* Device's eeprom is always little-endian, word addressable */
992 for (i = 0; i < last_word - first_word + 1; i++)
993 le16_to_cpus(&eeprom_buff[i]);
995 memcpy(ptr, bytes, eeprom->len);
997 for (i = 0; i < last_word - first_word + 1; i++)
998 cpu_to_le16s(&eeprom_buff[i]);
1000 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
1001 last_word - first_word + 1,
1004 /* Update the checksum */
1006 hw->eeprom.ops.update_checksum(hw);
1013 static void ixgbe_get_drvinfo(struct net_device *netdev,
1014 struct ethtool_drvinfo *drvinfo)
1016 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1019 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
1020 strlcpy(drvinfo->version, ixgbe_driver_version,
1021 sizeof(drvinfo->version));
1023 nvm_track_id = (adapter->eeprom_verh << 16) |
1024 adapter->eeprom_verl;
1025 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
1028 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
1029 sizeof(drvinfo->bus_info));
1031 drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
1034 static void ixgbe_get_ringparam(struct net_device *netdev,
1035 struct ethtool_ringparam *ring)
1037 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1038 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
1039 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
1041 ring->rx_max_pending = IXGBE_MAX_RXD;
1042 ring->tx_max_pending = IXGBE_MAX_TXD;
1043 ring->rx_pending = rx_ring->count;
1044 ring->tx_pending = tx_ring->count;
1047 static int ixgbe_set_ringparam(struct net_device *netdev,
1048 struct ethtool_ringparam *ring)
1050 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1051 struct ixgbe_ring *temp_ring;
1053 u32 new_rx_count, new_tx_count;
1055 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1058 new_tx_count = clamp_t(u32, ring->tx_pending,
1059 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
1060 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
1062 new_rx_count = clamp_t(u32, ring->rx_pending,
1063 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
1064 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
1066 if ((new_tx_count == adapter->tx_ring_count) &&
1067 (new_rx_count == adapter->rx_ring_count)) {
1072 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1073 usleep_range(1000, 2000);
1075 if (!netif_running(adapter->netdev)) {
1076 for (i = 0; i < adapter->num_tx_queues; i++)
1077 adapter->tx_ring[i]->count = new_tx_count;
1078 for (i = 0; i < adapter->num_xdp_queues; i++)
1079 adapter->xdp_ring[i]->count = new_tx_count;
1080 for (i = 0; i < adapter->num_rx_queues; i++)
1081 adapter->rx_ring[i]->count = new_rx_count;
1082 adapter->tx_ring_count = new_tx_count;
1083 adapter->xdp_ring_count = new_tx_count;
1084 adapter->rx_ring_count = new_rx_count;
1088 /* allocate temporary buffer to store rings in */
1089 i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1090 adapter->num_rx_queues);
1091 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
1098 ixgbe_down(adapter);
1101 * Setup new Tx resources and free the old Tx resources in that order.
1102 * We can then assign the new resources to the rings via a memcpy.
1103 * The advantage to this approach is that we are guaranteed to still
1104 * have resources even in the case of an allocation failure.
1106 if (new_tx_count != adapter->tx_ring_count) {
1107 for (i = 0; i < adapter->num_tx_queues; i++) {
1108 memcpy(&temp_ring[i], adapter->tx_ring[i],
1109 sizeof(struct ixgbe_ring));
1111 temp_ring[i].count = new_tx_count;
1112 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1116 ixgbe_free_tx_resources(&temp_ring[i]);
1122 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1123 memcpy(&temp_ring[i], adapter->xdp_ring[j],
1124 sizeof(struct ixgbe_ring));
1126 temp_ring[i].count = new_tx_count;
1127 err = ixgbe_setup_tx_resources(&temp_ring[i]);
1131 ixgbe_free_tx_resources(&temp_ring[i]);
1137 for (i = 0; i < adapter->num_tx_queues; i++) {
1138 ixgbe_free_tx_resources(adapter->tx_ring[i]);
1140 memcpy(adapter->tx_ring[i], &temp_ring[i],
1141 sizeof(struct ixgbe_ring));
1143 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1144 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1146 memcpy(adapter->xdp_ring[j], &temp_ring[i],
1147 sizeof(struct ixgbe_ring));
1150 adapter->tx_ring_count = new_tx_count;
1153 /* Repeat the process for the Rx rings if needed */
1154 if (new_rx_count != adapter->rx_ring_count) {
1155 for (i = 0; i < adapter->num_rx_queues; i++) {
1156 memcpy(&temp_ring[i], adapter->rx_ring[i],
1157 sizeof(struct ixgbe_ring));
1159 temp_ring[i].count = new_rx_count;
1160 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
1164 ixgbe_free_rx_resources(&temp_ring[i]);
1171 for (i = 0; i < adapter->num_rx_queues; i++) {
1172 ixgbe_free_rx_resources(adapter->rx_ring[i]);
1174 memcpy(adapter->rx_ring[i], &temp_ring[i],
1175 sizeof(struct ixgbe_ring));
1178 adapter->rx_ring_count = new_rx_count;
1185 clear_bit(__IXGBE_RESETTING, &adapter->state);
1189 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1193 return IXGBE_TEST_LEN;
1195 return IXGBE_STATS_LEN;
1196 case ETH_SS_PRIV_FLAGS:
1197 return IXGBE_PRIV_FLAGS_STR_LEN;
1203 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1204 struct ethtool_stats *stats, u64 *data)
1206 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1207 struct rtnl_link_stats64 temp;
1208 const struct rtnl_link_stats64 *net_stats;
1210 struct ixgbe_ring *ring;
1214 ixgbe_update_stats(adapter);
1215 net_stats = dev_get_stats(netdev, &temp);
1216 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1217 switch (ixgbe_gstrings_stats[i].type) {
1219 p = (char *) net_stats +
1220 ixgbe_gstrings_stats[i].stat_offset;
1223 p = (char *) adapter +
1224 ixgbe_gstrings_stats[i].stat_offset;
1231 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1232 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1234 for (j = 0; j < netdev->num_tx_queues; j++) {
1235 ring = adapter->tx_ring[j];
1244 start = u64_stats_fetch_begin_irq(&ring->syncp);
1245 data[i] = ring->stats.packets;
1246 data[i+1] = ring->stats.bytes;
1247 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1250 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1251 ring = adapter->rx_ring[j];
1260 start = u64_stats_fetch_begin_irq(&ring->syncp);
1261 data[i] = ring->stats.packets;
1262 data[i+1] = ring->stats.bytes;
1263 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1267 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1268 data[i++] = adapter->stats.pxontxc[j];
1269 data[i++] = adapter->stats.pxofftxc[j];
1271 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1272 data[i++] = adapter->stats.pxonrxc[j];
1273 data[i++] = adapter->stats.pxoffrxc[j];
1277 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1280 char *p = (char *)data;
1283 switch (stringset) {
1285 for (i = 0; i < IXGBE_TEST_LEN; i++) {
1286 memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1287 data += ETH_GSTRING_LEN;
1291 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1292 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1294 p += ETH_GSTRING_LEN;
1296 for (i = 0; i < netdev->num_tx_queues; i++) {
1297 sprintf(p, "tx_queue_%u_packets", i);
1298 p += ETH_GSTRING_LEN;
1299 sprintf(p, "tx_queue_%u_bytes", i);
1300 p += ETH_GSTRING_LEN;
1302 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1303 sprintf(p, "rx_queue_%u_packets", i);
1304 p += ETH_GSTRING_LEN;
1305 sprintf(p, "rx_queue_%u_bytes", i);
1306 p += ETH_GSTRING_LEN;
1308 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1309 sprintf(p, "tx_pb_%u_pxon", i);
1310 p += ETH_GSTRING_LEN;
1311 sprintf(p, "tx_pb_%u_pxoff", i);
1312 p += ETH_GSTRING_LEN;
1314 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1315 sprintf(p, "rx_pb_%u_pxon", i);
1316 p += ETH_GSTRING_LEN;
1317 sprintf(p, "rx_pb_%u_pxoff", i);
1318 p += ETH_GSTRING_LEN;
1320 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1322 case ETH_SS_PRIV_FLAGS:
1323 memcpy(data, ixgbe_priv_flags_strings,
1324 IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
1328 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1330 struct ixgbe_hw *hw = &adapter->hw;
1334 if (ixgbe_removed(hw->hw_addr)) {
1340 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1348 /* ethtool register test data */
1349 struct ixgbe_reg_test {
1357 /* In the hardware, registers are laid out either singly, in arrays
1358 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1359 * most tests take place on arrays or single registers (handled
1360 * as a single-element array) and special-case the tables.
1361 * Table tests are always pattern tests.
1363 * We also make provision for some required setup steps by specifying
1364 * registers to be written without any read-back testing.
1367 #define PATTERN_TEST 1
1368 #define SET_READ_TEST 2
1369 #define WRITE_NO_TEST 3
1370 #define TABLE32_TEST 4
1371 #define TABLE64_TEST_LO 5
1372 #define TABLE64_TEST_HI 6
1374 /* default 82599 register test */
1375 static const struct ixgbe_reg_test reg_test_82599[] = {
1376 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1377 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1378 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1379 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1380 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1381 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1382 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1383 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1384 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1385 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1386 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1387 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1388 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1389 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1390 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1391 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1392 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1393 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1394 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1398 /* default 82598 register test */
1399 static const struct ixgbe_reg_test reg_test_82598[] = {
1400 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1401 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1402 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1403 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1404 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1405 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1406 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1407 /* Enable all four RX queues before testing. */
1408 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1409 /* RDH is read-only for 82598, only test RDT. */
1410 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1411 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1412 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1413 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1414 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1415 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1416 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1417 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1418 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1419 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1420 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1421 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1422 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1426 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1427 u32 mask, u32 write)
1429 u32 pat, val, before;
1430 static const u32 test_pattern[] = {
1431 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1433 if (ixgbe_removed(adapter->hw.hw_addr)) {
1437 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1438 before = ixgbe_read_reg(&adapter->hw, reg);
1439 ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
1440 val = ixgbe_read_reg(&adapter->hw, reg);
1441 if (val != (test_pattern[pat] & write & mask)) {
1442 e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1443 reg, val, (test_pattern[pat] & write & mask));
1445 ixgbe_write_reg(&adapter->hw, reg, before);
1448 ixgbe_write_reg(&adapter->hw, reg, before);
1453 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1454 u32 mask, u32 write)
1458 if (ixgbe_removed(adapter->hw.hw_addr)) {
1462 before = ixgbe_read_reg(&adapter->hw, reg);
1463 ixgbe_write_reg(&adapter->hw, reg, write & mask);
1464 val = ixgbe_read_reg(&adapter->hw, reg);
1465 if ((write & mask) != (val & mask)) {
1466 e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1467 reg, (val & mask), (write & mask));
1469 ixgbe_write_reg(&adapter->hw, reg, before);
1472 ixgbe_write_reg(&adapter->hw, reg, before);
1476 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1478 const struct ixgbe_reg_test *test;
1479 u32 value, before, after;
1482 if (ixgbe_removed(adapter->hw.hw_addr)) {
1483 e_err(drv, "Adapter removed - register test blocked\n");
1487 switch (adapter->hw.mac.type) {
1488 case ixgbe_mac_82598EB:
1489 toggle = 0x7FFFF3FF;
1490 test = reg_test_82598;
1492 case ixgbe_mac_82599EB:
1493 case ixgbe_mac_X540:
1494 case ixgbe_mac_X550:
1495 case ixgbe_mac_X550EM_x:
1496 case ixgbe_mac_x550em_a:
1497 toggle = 0x7FFFF30F;
1498 test = reg_test_82599;
1506 * Because the status register is such a special case,
1507 * we handle it separately from the rest of the register
1508 * tests. Some bits are read-only, some toggle, and some
1509 * are writeable on newer MACs.
1511 before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
1512 value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
1513 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
1514 after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
1515 if (value != after) {
1516 e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1521 /* restore previous status */
1522 ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
1525 * Perform the remainder of the register test, looping through
1526 * the test table until we either fail or reach the null entry.
1529 for (i = 0; i < test->array_len; i++) {
1532 switch (test->test_type) {
1534 b = reg_pattern_test(adapter, data,
1535 test->reg + (i * 0x40),
1540 b = reg_set_and_check(adapter, data,
1541 test->reg + (i * 0x40),
1546 ixgbe_write_reg(&adapter->hw,
1547 test->reg + (i * 0x40),
1551 b = reg_pattern_test(adapter, data,
1552 test->reg + (i * 4),
1556 case TABLE64_TEST_LO:
1557 b = reg_pattern_test(adapter, data,
1558 test->reg + (i * 8),
1562 case TABLE64_TEST_HI:
1563 b = reg_pattern_test(adapter, data,
1564 (test->reg + 4) + (i * 8),
1579 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1581 struct ixgbe_hw *hw = &adapter->hw;
1582 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1589 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1591 struct net_device *netdev = (struct net_device *) data;
1592 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1594 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1599 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1601 struct net_device *netdev = adapter->netdev;
1602 u32 mask, i = 0, shared_int = true;
1603 u32 irq = adapter->pdev->irq;
1607 /* Hook up test interrupt handler just for this test */
1608 if (adapter->msix_entries) {
1609 /* NOTE: we don't test MSI-X interrupts here, yet */
1611 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1613 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1618 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1619 netdev->name, netdev)) {
1621 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1622 netdev->name, netdev)) {
1626 e_info(hw, "testing %s interrupt\n", shared_int ?
1627 "shared" : "unshared");
1629 /* Disable all the interrupts */
1630 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1631 IXGBE_WRITE_FLUSH(&adapter->hw);
1632 usleep_range(10000, 20000);
1634 /* Test each interrupt */
1635 for (; i < 10; i++) {
1636 /* Interrupt to test */
1641 * Disable the interrupts to be reported in
1642 * the cause register and then force the same
1643 * interrupt and see if one gets posted. If
1644 * an interrupt was posted to the bus, the
1647 adapter->test_icr = 0;
1648 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1649 ~mask & 0x00007FFF);
1650 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1651 ~mask & 0x00007FFF);
1652 IXGBE_WRITE_FLUSH(&adapter->hw);
1653 usleep_range(10000, 20000);
1655 if (adapter->test_icr & mask) {
1662 * Enable the interrupt to be reported in the cause
1663 * register and then force the same interrupt and see
1664 * if one gets posted. If an interrupt was not posted
1665 * to the bus, the test failed.
1667 adapter->test_icr = 0;
1668 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1669 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1670 IXGBE_WRITE_FLUSH(&adapter->hw);
1671 usleep_range(10000, 20000);
1673 if (!(adapter->test_icr & mask)) {
1680 * Disable the other interrupts to be reported in
1681 * the cause register and then force the other
1682 * interrupts and see if any get posted. If
1683 * an interrupt was posted to the bus, the
1686 adapter->test_icr = 0;
1687 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1688 ~mask & 0x00007FFF);
1689 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1690 ~mask & 0x00007FFF);
1691 IXGBE_WRITE_FLUSH(&adapter->hw);
1692 usleep_range(10000, 20000);
1694 if (adapter->test_icr) {
1701 /* Disable all the interrupts */
1702 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1703 IXGBE_WRITE_FLUSH(&adapter->hw);
1704 usleep_range(10000, 20000);
1706 /* Unhook test interrupt handler */
1707 free_irq(irq, netdev);
1712 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1714 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1715 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1716 struct ixgbe_hw *hw = &adapter->hw;
1719 /* shut down the DMA engines now so they can be reinitialized later */
1722 hw->mac.ops.disable_rx(hw);
1723 ixgbe_disable_rx_queue(adapter, rx_ring);
1726 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1727 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1728 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1730 switch (hw->mac.type) {
1731 case ixgbe_mac_82599EB:
1732 case ixgbe_mac_X540:
1733 case ixgbe_mac_X550:
1734 case ixgbe_mac_X550EM_x:
1735 case ixgbe_mac_x550em_a:
1736 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1737 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1738 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1744 ixgbe_reset(adapter);
1746 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1747 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1750 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1752 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1753 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1754 struct ixgbe_hw *hw = &adapter->hw;
1759 /* Setup Tx descriptor ring and Tx buffers */
1760 tx_ring->count = IXGBE_DEFAULT_TXD;
1761 tx_ring->queue_index = 0;
1762 tx_ring->dev = &adapter->pdev->dev;
1763 tx_ring->netdev = adapter->netdev;
1764 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1766 err = ixgbe_setup_tx_resources(tx_ring);
1770 switch (adapter->hw.mac.type) {
1771 case ixgbe_mac_82599EB:
1772 case ixgbe_mac_X540:
1773 case ixgbe_mac_X550:
1774 case ixgbe_mac_X550EM_x:
1775 case ixgbe_mac_x550em_a:
1776 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1777 reg_data |= IXGBE_DMATXCTL_TE;
1778 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1784 ixgbe_configure_tx_ring(adapter, tx_ring);
1786 /* Setup Rx Descriptor ring and Rx buffers */
1787 rx_ring->count = IXGBE_DEFAULT_RXD;
1788 rx_ring->queue_index = 0;
1789 rx_ring->dev = &adapter->pdev->dev;
1790 rx_ring->netdev = adapter->netdev;
1791 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1793 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1799 hw->mac.ops.disable_rx(hw);
1801 ixgbe_configure_rx_ring(adapter, rx_ring);
1803 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1804 rctl |= IXGBE_RXCTRL_DMBYPS;
1805 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1807 hw->mac.ops.enable_rx(hw);
1812 ixgbe_free_desc_rings(adapter);
1816 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1818 struct ixgbe_hw *hw = &adapter->hw;
1822 /* Setup MAC loopback */
1823 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1824 reg_data |= IXGBE_HLREG0_LPBK;
1825 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1827 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1828 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1829 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1831 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1832 switch (adapter->hw.mac.type) {
1833 case ixgbe_mac_X540:
1834 case ixgbe_mac_X550:
1835 case ixgbe_mac_X550EM_x:
1836 case ixgbe_mac_x550em_a:
1837 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1838 reg_data |= IXGBE_MACC_FLU;
1839 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1842 if (hw->mac.orig_autoc) {
1843 reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU;
1844 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1849 IXGBE_WRITE_FLUSH(hw);
1850 usleep_range(10000, 20000);
1852 /* Disable Atlas Tx lanes; re-enabled in reset path */
1853 if (hw->mac.type == ixgbe_mac_82598EB) {
1856 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1857 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1858 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1860 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1861 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1862 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1864 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1865 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1866 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1868 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1869 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1870 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1876 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1880 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1881 reg_data &= ~IXGBE_HLREG0_LPBK;
1882 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1885 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1886 unsigned int frame_size)
1888 memset(skb->data, 0xFF, frame_size);
1890 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1891 memset(&skb->data[frame_size + 10], 0xBE, 1);
1892 memset(&skb->data[frame_size + 12], 0xAF, 1);
1895 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1896 unsigned int frame_size)
1898 unsigned char *data;
1903 data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1905 if (data[3] != 0xFF ||
1906 data[frame_size + 10] != 0xBE ||
1907 data[frame_size + 12] != 0xAF)
1910 kunmap(rx_buffer->page);
1915 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1916 struct ixgbe_ring *tx_ring,
1919 union ixgbe_adv_rx_desc *rx_desc;
1920 u16 rx_ntc, tx_ntc, count = 0;
1922 /* initialize next to clean and descriptor values */
1923 rx_ntc = rx_ring->next_to_clean;
1924 tx_ntc = tx_ring->next_to_clean;
1925 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1927 while (tx_ntc != tx_ring->next_to_use) {
1928 union ixgbe_adv_tx_desc *tx_desc;
1929 struct ixgbe_tx_buffer *tx_buffer;
1931 tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
1933 /* if DD is not set transmit has not completed */
1934 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
1937 /* unmap buffer on Tx side */
1938 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1940 /* Free all the Tx ring sk_buffs */
1941 dev_kfree_skb_any(tx_buffer->skb);
1943 /* unmap skb header data */
1944 dma_unmap_single(tx_ring->dev,
1945 dma_unmap_addr(tx_buffer, dma),
1946 dma_unmap_len(tx_buffer, len),
1948 dma_unmap_len_set(tx_buffer, len, 0);
1950 /* increment Tx next to clean counter */
1952 if (tx_ntc == tx_ring->count)
1956 while (rx_desc->wb.upper.length) {
1957 struct ixgbe_rx_buffer *rx_buffer;
1959 /* check Rx buffer */
1960 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1962 /* sync Rx buffer for CPU read */
1963 dma_sync_single_for_cpu(rx_ring->dev,
1965 ixgbe_rx_bufsz(rx_ring),
1968 /* verify contents of skb */
1969 if (ixgbe_check_lbtest_frame(rx_buffer, size))
1974 /* sync Rx buffer for device write */
1975 dma_sync_single_for_device(rx_ring->dev,
1977 ixgbe_rx_bufsz(rx_ring),
1980 /* increment Rx next to clean counter */
1982 if (rx_ntc == rx_ring->count)
1985 /* fetch next descriptor */
1986 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1989 netdev_tx_reset_queue(txring_txq(tx_ring));
1991 /* re-map buffers to ring, store next to clean values */
1992 ixgbe_alloc_rx_buffers(rx_ring, count);
1993 rx_ring->next_to_clean = rx_ntc;
1994 tx_ring->next_to_clean = tx_ntc;
1999 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
2001 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
2002 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
2003 int i, j, lc, good_cnt, ret_val = 0;
2004 unsigned int size = 1024;
2005 netdev_tx_t tx_ret_val;
2006 struct sk_buff *skb;
2007 u32 flags_orig = adapter->flags;
2009 /* DCB can modify the frames on Tx */
2010 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2012 /* allocate test skb */
2013 skb = alloc_skb(size, GFP_KERNEL);
2017 /* place data into test skb */
2018 ixgbe_create_lbtest_frame(skb, size);
2022 * Calculate the loop count based on the largest descriptor ring
2023 * The idea is to wrap the largest ring a number of times using 64
2024 * send/receive pairs during each loop
2027 if (rx_ring->count <= tx_ring->count)
2028 lc = ((tx_ring->count / 64) * 2) + 1;
2030 lc = ((rx_ring->count / 64) * 2) + 1;
2032 for (j = 0; j <= lc; j++) {
2033 /* reset count of good packets */
2036 /* place 64 packets on the transmit queue*/
2037 for (i = 0; i < 64; i++) {
2039 tx_ret_val = ixgbe_xmit_frame_ring(skb,
2042 if (tx_ret_val == NETDEV_TX_OK)
2046 if (good_cnt != 64) {
2051 /* allow 200 milliseconds for packets to go from Tx to Rx */
2054 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
2055 if (good_cnt != 64) {
2061 /* free the original skb */
2063 adapter->flags = flags_orig;
2068 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
2070 *data = ixgbe_setup_desc_rings(adapter);
2073 *data = ixgbe_setup_loopback_test(adapter);
2076 *data = ixgbe_run_loopback_test(adapter);
2077 ixgbe_loopback_cleanup(adapter);
2080 ixgbe_free_desc_rings(adapter);
2085 static void ixgbe_diag_test(struct net_device *netdev,
2086 struct ethtool_test *eth_test, u64 *data)
2088 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2089 bool if_running = netif_running(netdev);
2091 if (ixgbe_removed(adapter->hw.hw_addr)) {
2092 e_err(hw, "Adapter removed - test blocked\n");
2098 eth_test->flags |= ETH_TEST_FL_FAILED;
2101 set_bit(__IXGBE_TESTING, &adapter->state);
2102 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
2103 struct ixgbe_hw *hw = &adapter->hw;
2105 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2107 for (i = 0; i < adapter->num_vfs; i++) {
2108 if (adapter->vfinfo[i].clear_to_send) {
2109 netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
2115 eth_test->flags |= ETH_TEST_FL_FAILED;
2116 clear_bit(__IXGBE_TESTING,
2124 e_info(hw, "offline testing starting\n");
2126 /* Link test performed before hardware reset so autoneg doesn't
2127 * interfere with test result
2129 if (ixgbe_link_test(adapter, &data[4]))
2130 eth_test->flags |= ETH_TEST_FL_FAILED;
2133 /* indicate we're in test mode */
2134 ixgbe_close(netdev);
2136 ixgbe_reset(adapter);
2138 e_info(hw, "register testing starting\n");
2139 if (ixgbe_reg_test(adapter, &data[0]))
2140 eth_test->flags |= ETH_TEST_FL_FAILED;
2142 ixgbe_reset(adapter);
2143 e_info(hw, "eeprom testing starting\n");
2144 if (ixgbe_eeprom_test(adapter, &data[1]))
2145 eth_test->flags |= ETH_TEST_FL_FAILED;
2147 ixgbe_reset(adapter);
2148 e_info(hw, "interrupt testing starting\n");
2149 if (ixgbe_intr_test(adapter, &data[2]))
2150 eth_test->flags |= ETH_TEST_FL_FAILED;
2152 /* If SRIOV or VMDq is enabled then skip MAC
2153 * loopback diagnostic. */
2154 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
2155 IXGBE_FLAG_VMDQ_ENABLED)) {
2156 e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
2161 ixgbe_reset(adapter);
2162 e_info(hw, "loopback testing starting\n");
2163 if (ixgbe_loopback_test(adapter, &data[3]))
2164 eth_test->flags |= ETH_TEST_FL_FAILED;
2167 ixgbe_reset(adapter);
2169 /* clear testing bit and return adapter to previous state */
2170 clear_bit(__IXGBE_TESTING, &adapter->state);
2173 else if (hw->mac.ops.disable_tx_laser)
2174 hw->mac.ops.disable_tx_laser(hw);
2176 e_info(hw, "online testing starting\n");
2179 if (ixgbe_link_test(adapter, &data[4]))
2180 eth_test->flags |= ETH_TEST_FL_FAILED;
2182 /* Offline tests aren't run; pass by default */
2188 clear_bit(__IXGBE_TESTING, &adapter->state);
2192 msleep_interruptible(4 * 1000);
2195 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
2196 struct ethtool_wolinfo *wol)
2198 struct ixgbe_hw *hw = &adapter->hw;
2201 /* WOL not supported for all devices */
2202 if (!ixgbe_wol_supported(adapter, hw->device_id,
2203 hw->subsystem_device_id)) {
2211 static void ixgbe_get_wol(struct net_device *netdev,
2212 struct ethtool_wolinfo *wol)
2214 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2216 wol->supported = WAKE_UCAST | WAKE_MCAST |
2217 WAKE_BCAST | WAKE_MAGIC;
2220 if (ixgbe_wol_exclusion(adapter, wol) ||
2221 !device_can_wakeup(&adapter->pdev->dev))
2224 if (adapter->wol & IXGBE_WUFC_EX)
2225 wol->wolopts |= WAKE_UCAST;
2226 if (adapter->wol & IXGBE_WUFC_MC)
2227 wol->wolopts |= WAKE_MCAST;
2228 if (adapter->wol & IXGBE_WUFC_BC)
2229 wol->wolopts |= WAKE_BCAST;
2230 if (adapter->wol & IXGBE_WUFC_MAG)
2231 wol->wolopts |= WAKE_MAGIC;
2234 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2236 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2238 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
2241 if (ixgbe_wol_exclusion(adapter, wol))
2242 return wol->wolopts ? -EOPNOTSUPP : 0;
2246 if (wol->wolopts & WAKE_UCAST)
2247 adapter->wol |= IXGBE_WUFC_EX;
2248 if (wol->wolopts & WAKE_MCAST)
2249 adapter->wol |= IXGBE_WUFC_MC;
2250 if (wol->wolopts & WAKE_BCAST)
2251 adapter->wol |= IXGBE_WUFC_BC;
2252 if (wol->wolopts & WAKE_MAGIC)
2253 adapter->wol |= IXGBE_WUFC_MAG;
2255 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2260 static int ixgbe_nway_reset(struct net_device *netdev)
2262 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2264 if (netif_running(netdev))
2265 ixgbe_reinit_locked(adapter);
2270 static int ixgbe_set_phys_id(struct net_device *netdev,
2271 enum ethtool_phys_id_state state)
2273 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2274 struct ixgbe_hw *hw = &adapter->hw;
2276 if (!hw->mac.ops.led_on || !hw->mac.ops.led_off)
2280 case ETHTOOL_ID_ACTIVE:
2281 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2285 hw->mac.ops.led_on(hw, hw->mac.led_link_act);
2288 case ETHTOOL_ID_OFF:
2289 hw->mac.ops.led_off(hw, hw->mac.led_link_act);
2292 case ETHTOOL_ID_INACTIVE:
2293 /* Restore LED settings */
2294 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2301 static int ixgbe_get_coalesce(struct net_device *netdev,
2302 struct ethtool_coalesce *ec)
2304 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2306 /* only valid if in constant ITR mode */
2307 if (adapter->rx_itr_setting <= 1)
2308 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2310 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2312 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2313 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2316 /* only valid if in constant ITR mode */
2317 if (adapter->tx_itr_setting <= 1)
2318 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2320 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2326 * this function must be called before setting the new value of
2329 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2331 struct net_device *netdev = adapter->netdev;
2333 /* nothing to do if LRO or RSC are not enabled */
2334 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2335 !(netdev->features & NETIF_F_LRO))
2338 /* check the feature flag value and enable RSC if necessary */
2339 if (adapter->rx_itr_setting == 1 ||
2340 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2341 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2342 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2343 e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
2346 /* if interrupt rate is too high then disable RSC */
2347 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2348 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2349 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2355 static int ixgbe_set_coalesce(struct net_device *netdev,
2356 struct ethtool_coalesce *ec)
2358 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2359 struct ixgbe_q_vector *q_vector;
2361 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2362 bool need_reset = false;
2364 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2365 /* reject Tx specific changes in case of mixed RxTx vectors */
2366 if (ec->tx_coalesce_usecs)
2368 tx_itr_prev = adapter->rx_itr_setting;
2370 tx_itr_prev = adapter->tx_itr_setting;
2373 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2374 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2377 if (ec->rx_coalesce_usecs > 1)
2378 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2380 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2382 if (adapter->rx_itr_setting == 1)
2383 rx_itr_param = IXGBE_20K_ITR;
2385 rx_itr_param = adapter->rx_itr_setting;
2387 if (ec->tx_coalesce_usecs > 1)
2388 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2390 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2392 if (adapter->tx_itr_setting == 1)
2393 tx_itr_param = IXGBE_12K_ITR;
2395 tx_itr_param = adapter->tx_itr_setting;
2398 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2399 adapter->tx_itr_setting = adapter->rx_itr_setting;
2401 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2402 if ((adapter->tx_itr_setting != 1) &&
2403 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2404 if ((tx_itr_prev == 1) ||
2405 (tx_itr_prev >= IXGBE_100K_ITR))
2408 if ((tx_itr_prev != 1) &&
2409 (tx_itr_prev < IXGBE_100K_ITR))
2413 /* check the old value and enable RSC if necessary */
2414 need_reset |= ixgbe_update_rsc(adapter);
2416 for (i = 0; i < adapter->num_q_vectors; i++) {
2417 q_vector = adapter->q_vector[i];
2418 if (q_vector->tx.count && !q_vector->rx.count)
2420 q_vector->itr = tx_itr_param;
2422 /* rx only or mixed */
2423 q_vector->itr = rx_itr_param;
2424 ixgbe_write_eitr(q_vector);
2428 * do reset here at the end to make sure EITR==0 case is handled
2429 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2430 * also locks in RSC enable/disable which requires reset
2433 ixgbe_do_reset(netdev);
2438 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2439 struct ethtool_rxnfc *cmd)
2441 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2442 struct ethtool_rx_flow_spec *fsp =
2443 (struct ethtool_rx_flow_spec *)&cmd->fs;
2444 struct hlist_node *node2;
2445 struct ixgbe_fdir_filter *rule = NULL;
2447 /* report total rule count */
2448 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2450 hlist_for_each_entry_safe(rule, node2,
2451 &adapter->fdir_filter_list, fdir_node) {
2452 if (fsp->location <= rule->sw_idx)
2456 if (!rule || fsp->location != rule->sw_idx)
2459 /* fill out the flow spec entry */
2461 /* set flow type field */
2462 switch (rule->filter.formatted.flow_type) {
2463 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2464 fsp->flow_type = TCP_V4_FLOW;
2466 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2467 fsp->flow_type = UDP_V4_FLOW;
2469 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2470 fsp->flow_type = SCTP_V4_FLOW;
2472 case IXGBE_ATR_FLOW_TYPE_IPV4:
2473 fsp->flow_type = IP_USER_FLOW;
2474 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2475 fsp->h_u.usr_ip4_spec.proto = 0;
2476 fsp->m_u.usr_ip4_spec.proto = 0;
2482 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2483 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2484 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2485 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2486 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2487 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2488 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2489 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2490 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2491 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2492 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2493 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2494 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2495 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2496 fsp->flow_type |= FLOW_EXT;
2499 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2500 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2502 fsp->ring_cookie = rule->action;
2507 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2508 struct ethtool_rxnfc *cmd,
2511 struct hlist_node *node2;
2512 struct ixgbe_fdir_filter *rule;
2515 /* report total rule count */
2516 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2518 hlist_for_each_entry_safe(rule, node2,
2519 &adapter->fdir_filter_list, fdir_node) {
2520 if (cnt == cmd->rule_cnt)
2522 rule_locs[cnt] = rule->sw_idx;
2526 cmd->rule_cnt = cnt;
2531 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2532 struct ethtool_rxnfc *cmd)
2536 /* Report default options for RSS on ixgbe */
2537 switch (cmd->flow_type) {
2539 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2542 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2543 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2546 case AH_ESP_V4_FLOW:
2550 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2553 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2556 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2557 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2560 case AH_ESP_V6_FLOW:
2564 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2573 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2576 struct ixgbe_adapter *adapter = netdev_priv(dev);
2577 int ret = -EOPNOTSUPP;
2580 case ETHTOOL_GRXRINGS:
2581 cmd->data = adapter->num_rx_queues;
2584 case ETHTOOL_GRXCLSRLCNT:
2585 cmd->rule_cnt = adapter->fdir_filter_count;
2588 case ETHTOOL_GRXCLSRULE:
2589 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2591 case ETHTOOL_GRXCLSRLALL:
2592 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2595 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2604 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2605 struct ixgbe_fdir_filter *input,
2608 struct ixgbe_hw *hw = &adapter->hw;
2609 struct hlist_node *node2;
2610 struct ixgbe_fdir_filter *rule, *parent;
2616 hlist_for_each_entry_safe(rule, node2,
2617 &adapter->fdir_filter_list, fdir_node) {
2618 /* hash found, or no matching entry */
2619 if (rule->sw_idx >= sw_idx)
2624 /* if there is an old rule occupying our place remove it */
2625 if (rule && (rule->sw_idx == sw_idx)) {
2626 if (!input || (rule->filter.formatted.bkt_hash !=
2627 input->filter.formatted.bkt_hash)) {
2628 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2633 hlist_del(&rule->fdir_node);
2635 adapter->fdir_filter_count--;
2639 * If no input this was a delete, err should be 0 if a rule was
2640 * successfully found and removed from the list else -EINVAL
2645 /* initialize node and set software index */
2646 INIT_HLIST_NODE(&input->fdir_node);
2648 /* add filter to the list */
2650 hlist_add_behind(&input->fdir_node, &parent->fdir_node);
2652 hlist_add_head(&input->fdir_node,
2653 &adapter->fdir_filter_list);
2656 adapter->fdir_filter_count++;
2661 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2664 switch (fsp->flow_type & ~FLOW_EXT) {
2666 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2669 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2672 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2675 switch (fsp->h_u.usr_ip4_spec.proto) {
2677 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2680 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2683 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2686 if (!fsp->m_u.usr_ip4_spec.proto) {
2687 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2702 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2703 struct ethtool_rxnfc *cmd)
2705 struct ethtool_rx_flow_spec *fsp =
2706 (struct ethtool_rx_flow_spec *)&cmd->fs;
2707 struct ixgbe_hw *hw = &adapter->hw;
2708 struct ixgbe_fdir_filter *input;
2709 union ixgbe_atr_input mask;
2713 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2716 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2717 * we use the drop index.
2719 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
2720 queue = IXGBE_FDIR_DROP_QUEUE;
2722 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
2723 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
2725 if (!vf && (ring >= adapter->num_rx_queues))
2728 ((vf > adapter->num_vfs) ||
2729 ring >= adapter->num_rx_queues_per_pool))
2732 /* Map the ring onto the absolute queue index */
2734 queue = adapter->rx_ring[ring]->reg_idx;
2737 adapter->num_rx_queues_per_pool) + ring;
2740 /* Don't allow indexes to exist outside of available space */
2741 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2742 e_err(drv, "Location out of range\n");
2746 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2750 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2753 input->sw_idx = fsp->location;
2755 /* record flow type */
2756 if (!ixgbe_flowspec_to_flow_type(fsp,
2757 &input->filter.formatted.flow_type)) {
2758 e_err(drv, "Unrecognized flow type\n");
2762 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2763 IXGBE_ATR_L4TYPE_MASK;
2765 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2766 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2768 /* Copy input into formatted structures */
2769 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2770 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2771 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2772 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2773 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2774 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2775 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2776 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2778 if (fsp->flow_type & FLOW_EXT) {
2779 input->filter.formatted.vm_pool =
2780 (unsigned char)ntohl(fsp->h_ext.data[1]);
2781 mask.formatted.vm_pool =
2782 (unsigned char)ntohl(fsp->m_ext.data[1]);
2783 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2784 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2785 input->filter.formatted.flex_bytes =
2786 fsp->h_ext.vlan_etype;
2787 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2790 /* determine if we need to drop or route the packet */
2791 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2792 input->action = IXGBE_FDIR_DROP_QUEUE;
2794 input->action = fsp->ring_cookie;
2796 spin_lock(&adapter->fdir_perfect_lock);
2798 if (hlist_empty(&adapter->fdir_filter_list)) {
2799 /* save mask and program input mask into HW */
2800 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2801 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2803 e_err(drv, "Error writing mask\n");
2804 goto err_out_w_lock;
2806 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2807 e_err(drv, "Only one mask supported per port\n");
2808 goto err_out_w_lock;
2811 /* apply mask and compute/store hash */
2812 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2814 /* program filters to filter memory */
2815 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2816 &input->filter, input->sw_idx, queue);
2818 goto err_out_w_lock;
2820 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2822 spin_unlock(&adapter->fdir_perfect_lock);
2826 spin_unlock(&adapter->fdir_perfect_lock);
2832 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2833 struct ethtool_rxnfc *cmd)
2835 struct ethtool_rx_flow_spec *fsp =
2836 (struct ethtool_rx_flow_spec *)&cmd->fs;
2839 spin_lock(&adapter->fdir_perfect_lock);
2840 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2841 spin_unlock(&adapter->fdir_perfect_lock);
2846 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2847 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2848 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2849 struct ethtool_rxnfc *nfc)
2851 u32 flags2 = adapter->flags2;
2854 * RSS does not support anything other than hashing
2855 * to queues on src and dst IPs and ports
2857 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2858 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2861 switch (nfc->flow_type) {
2864 if (!(nfc->data & RXH_IP_SRC) ||
2865 !(nfc->data & RXH_IP_DST) ||
2866 !(nfc->data & RXH_L4_B_0_1) ||
2867 !(nfc->data & RXH_L4_B_2_3))
2871 if (!(nfc->data & RXH_IP_SRC) ||
2872 !(nfc->data & RXH_IP_DST))
2874 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2876 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2878 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2879 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2886 if (!(nfc->data & RXH_IP_SRC) ||
2887 !(nfc->data & RXH_IP_DST))
2889 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2891 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2893 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2894 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2900 case AH_ESP_V4_FLOW:
2904 case AH_ESP_V6_FLOW:
2908 if (!(nfc->data & RXH_IP_SRC) ||
2909 !(nfc->data & RXH_IP_DST) ||
2910 (nfc->data & RXH_L4_B_0_1) ||
2911 (nfc->data & RXH_L4_B_2_3))
2918 /* if we changed something we need to update flags */
2919 if (flags2 != adapter->flags2) {
2920 struct ixgbe_hw *hw = &adapter->hw;
2922 unsigned int pf_pool = adapter->num_vfs;
2924 if ((hw->mac.type >= ixgbe_mac_X550) &&
2925 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2926 mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool));
2928 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2930 if ((flags2 & UDP_RSS_FLAGS) &&
2931 !(adapter->flags2 & UDP_RSS_FLAGS))
2932 e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2934 adapter->flags2 = flags2;
2936 /* Perform hash on these packet types */
2937 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2938 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2939 | IXGBE_MRQC_RSS_FIELD_IPV6
2940 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2942 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2943 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2945 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2946 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2948 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2949 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2951 if ((hw->mac.type >= ixgbe_mac_X550) &&
2952 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2953 IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc);
2955 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2961 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2963 struct ixgbe_adapter *adapter = netdev_priv(dev);
2964 int ret = -EOPNOTSUPP;
2967 case ETHTOOL_SRXCLSRLINS:
2968 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2970 case ETHTOOL_SRXCLSRLDEL:
2971 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2974 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2983 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
2985 if (adapter->hw.mac.type < ixgbe_mac_X550)
2991 static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
2993 return IXGBE_RSS_KEY_SIZE;
2996 static u32 ixgbe_rss_indir_size(struct net_device *netdev)
2998 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3000 return ixgbe_rss_indir_tbl_entries(adapter);
3003 static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
3005 int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
3006 u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
3008 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3009 rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
3011 for (i = 0; i < reta_size; i++)
3012 indir[i] = adapter->rss_indir_tbl[i] & rss_m;
3015 static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
3018 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3021 *hfunc = ETH_RSS_HASH_TOP;
3024 ixgbe_get_reta(adapter, indir);
3027 memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev));
3032 static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
3033 const u8 *key, const u8 hfunc)
3035 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3037 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
3042 /* Fill out the redirection table */
3044 int max_queues = min_t(int, adapter->num_rx_queues,
3045 ixgbe_rss_indir_tbl_max(adapter));
3047 /*Allow at least 2 queues w/ SR-IOV.*/
3048 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3052 /* Verify user input. */
3053 for (i = 0; i < reta_entries; i++)
3054 if (indir[i] >= max_queues)
3057 for (i = 0; i < reta_entries; i++)
3058 adapter->rss_indir_tbl[i] = indir[i];
3061 /* Fill out the rss hash key */
3063 memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
3064 ixgbe_store_key(adapter);
3067 ixgbe_store_reta(adapter);
3072 static int ixgbe_get_ts_info(struct net_device *dev,
3073 struct ethtool_ts_info *info)
3075 struct ixgbe_adapter *adapter = netdev_priv(dev);
3077 /* we always support timestamping disabled */
3078 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
3080 switch (adapter->hw.mac.type) {
3081 case ixgbe_mac_X550:
3082 case ixgbe_mac_X550EM_x:
3083 case ixgbe_mac_x550em_a:
3084 info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
3086 case ixgbe_mac_X540:
3087 case ixgbe_mac_82599EB:
3088 info->so_timestamping =
3089 SOF_TIMESTAMPING_TX_SOFTWARE |
3090 SOF_TIMESTAMPING_RX_SOFTWARE |
3091 SOF_TIMESTAMPING_SOFTWARE |
3092 SOF_TIMESTAMPING_TX_HARDWARE |
3093 SOF_TIMESTAMPING_RX_HARDWARE |
3094 SOF_TIMESTAMPING_RAW_HARDWARE;
3096 if (adapter->ptp_clock)
3097 info->phc_index = ptp_clock_index(adapter->ptp_clock);
3099 info->phc_index = -1;
3102 BIT(HWTSTAMP_TX_OFF) |
3103 BIT(HWTSTAMP_TX_ON);
3106 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
3107 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
3108 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
3111 return ethtool_op_get_ts_info(dev, info);
3116 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
3118 unsigned int max_combined;
3119 u8 tcs = netdev_get_num_tc(adapter->netdev);
3121 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
3122 /* We only support one q_vector without MSI-X */
3124 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3125 /* Limit value based on the queue mask */
3126 max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
3127 } else if (tcs > 1) {
3128 /* For DCB report channels per traffic class */
3129 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3130 /* 8 TC w/ 4 queues per TC */
3132 } else if (tcs > 4) {
3133 /* 8 TC w/ 8 queues per TC */
3136 /* 4 TC w/ 16 queues per TC */
3139 } else if (adapter->atr_sample_rate) {
3140 /* support up to 64 queues with ATR */
3141 max_combined = IXGBE_MAX_FDIR_INDICES;
3143 /* support up to 16 queues with RSS */
3144 max_combined = ixgbe_max_rss_indices(adapter);
3147 return max_combined;
3150 static void ixgbe_get_channels(struct net_device *dev,
3151 struct ethtool_channels *ch)
3153 struct ixgbe_adapter *adapter = netdev_priv(dev);
3155 /* report maximum channels */
3156 ch->max_combined = ixgbe_max_channels(adapter);
3158 /* report info for other vector */
3159 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3160 ch->max_other = NON_Q_VECTORS;
3161 ch->other_count = NON_Q_VECTORS;
3164 /* record RSS queues */
3165 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
3167 /* nothing else to report if RSS is disabled */
3168 if (ch->combined_count == 1)
3171 /* we do not support ATR queueing if SR-IOV is enabled */
3172 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3175 /* same thing goes for being DCB enabled */
3176 if (netdev_get_num_tc(dev) > 1)
3179 /* if ATR is disabled we can exit */
3180 if (!adapter->atr_sample_rate)
3183 /* report flow director queues as maximum channels */
3184 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
3187 static int ixgbe_set_channels(struct net_device *dev,
3188 struct ethtool_channels *ch)
3190 struct ixgbe_adapter *adapter = netdev_priv(dev);
3191 unsigned int count = ch->combined_count;
3192 u8 max_rss_indices = ixgbe_max_rss_indices(adapter);
3194 /* verify they are not requesting separate vectors */
3195 if (!count || ch->rx_count || ch->tx_count)
3198 /* verify other_count has not changed */
3199 if (ch->other_count != NON_Q_VECTORS)
3202 /* verify the number of channels does not exceed hardware limits */
3203 if (count > ixgbe_max_channels(adapter))
3206 /* update feature limits from largest to smallest supported values */
3207 adapter->ring_feature[RING_F_FDIR].limit = count;
3210 if (count > max_rss_indices)
3211 count = max_rss_indices;
3212 adapter->ring_feature[RING_F_RSS].limit = count;
3215 /* cap FCoE limit at 8 */
3216 if (count > IXGBE_FCRETA_SIZE)
3217 count = IXGBE_FCRETA_SIZE;
3218 adapter->ring_feature[RING_F_FCOE].limit = count;
3221 /* use setup TC to update any traffic class queue mapping */
3222 return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
3225 static int ixgbe_get_module_info(struct net_device *dev,
3226 struct ethtool_modinfo *modinfo)
3228 struct ixgbe_adapter *adapter = netdev_priv(dev);
3229 struct ixgbe_hw *hw = &adapter->hw;
3231 u8 sff8472_rev, addr_mode;
3232 bool page_swap = false;
3234 if (hw->phy.type == ixgbe_phy_fw)
3237 /* Check whether we support SFF-8472 or not */
3238 status = hw->phy.ops.read_i2c_eeprom(hw,
3239 IXGBE_SFF_SFF_8472_COMP,
3244 /* addressing mode is not supported */
3245 status = hw->phy.ops.read_i2c_eeprom(hw,
3246 IXGBE_SFF_SFF_8472_SWAP,
3251 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
3252 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3256 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
3257 /* We have a SFP, but it does not support SFF-8472 */
3258 modinfo->type = ETH_MODULE_SFF_8079;
3259 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
3261 /* We have a SFP which supports a revision of SFF-8472. */
3262 modinfo->type = ETH_MODULE_SFF_8472;
3263 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3269 static int ixgbe_get_module_eeprom(struct net_device *dev,
3270 struct ethtool_eeprom *ee,
3273 struct ixgbe_adapter *adapter = netdev_priv(dev);
3274 struct ixgbe_hw *hw = &adapter->hw;
3275 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
3282 if (hw->phy.type == ixgbe_phy_fw)
3285 for (i = ee->offset; i < ee->offset + ee->len; i++) {
3286 /* I2C reads can take long time */
3287 if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
3290 if (i < ETH_MODULE_SFF_8079_LEN)
3291 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
3293 status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
3298 data[i - ee->offset] = databyte;
3304 static const struct {
3305 ixgbe_link_speed mac_speed;
3307 } ixgbe_ls_map[] = {
3308 { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
3309 { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
3310 { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
3311 { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
3312 { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
3315 static const struct {
3318 } ixgbe_lp_map[] = {
3319 { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
3320 { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
3321 { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
3322 { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
3323 { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
3324 { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
3328 ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
3330 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
3331 struct ixgbe_hw *hw = &adapter->hw;
3335 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
3339 edata->lp_advertised = 0;
3340 for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
3341 if (info[0] & ixgbe_lp_map[i].lp_advertised)
3342 edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
3345 edata->supported = 0;
3346 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3347 if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
3348 edata->supported |= ixgbe_ls_map[i].supported;
3351 edata->advertised = 0;
3352 for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
3353 if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
3354 edata->advertised |= ixgbe_ls_map[i].supported;
3357 edata->eee_enabled = !!edata->advertised;
3358 edata->tx_lpi_enabled = edata->eee_enabled;
3359 if (edata->advertised & edata->lp_advertised)
3360 edata->eee_active = true;
3365 static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
3367 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3368 struct ixgbe_hw *hw = &adapter->hw;
3370 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3373 if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
3374 return ixgbe_get_eee_fw(adapter, edata);
3379 static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
3381 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3382 struct ixgbe_hw *hw = &adapter->hw;
3383 struct ethtool_eee eee_data;
3386 if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
3389 memset(&eee_data, 0, sizeof(struct ethtool_eee));
3391 ret_val = ixgbe_get_eee(netdev, &eee_data);
3395 if (eee_data.eee_enabled && !edata->eee_enabled) {
3396 if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
3397 e_err(drv, "Setting EEE tx-lpi is not supported\n");
3401 if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
3403 "Setting EEE Tx LPI timer is not supported\n");
3407 if (eee_data.advertised != edata->advertised) {
3409 "Setting EEE advertised speeds is not supported\n");
3414 if (eee_data.eee_enabled != edata->eee_enabled) {
3415 if (edata->eee_enabled) {
3416 adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
3417 hw->phy.eee_speeds_advertised =
3418 hw->phy.eee_speeds_supported;
3420 adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
3421 hw->phy.eee_speeds_advertised = 0;
3425 if (netif_running(netdev))
3426 ixgbe_reinit_locked(adapter);
3428 ixgbe_reset(adapter);
3434 static u32 ixgbe_get_priv_flags(struct net_device *netdev)
3436 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3439 if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
3440 priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
3445 static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
3447 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3448 unsigned int flags2 = adapter->flags2;
3450 flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
3451 if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
3452 flags2 |= IXGBE_FLAG2_RX_LEGACY;
3454 if (flags2 != adapter->flags2) {
3455 adapter->flags2 = flags2;
3457 /* reset interface to repopulate queues */
3458 if (netif_running(netdev))
3459 ixgbe_reinit_locked(adapter);
3465 static const struct ethtool_ops ixgbe_ethtool_ops = {
3466 .get_drvinfo = ixgbe_get_drvinfo,
3467 .get_regs_len = ixgbe_get_regs_len,
3468 .get_regs = ixgbe_get_regs,
3469 .get_wol = ixgbe_get_wol,
3470 .set_wol = ixgbe_set_wol,
3471 .nway_reset = ixgbe_nway_reset,
3472 .get_link = ethtool_op_get_link,
3473 .get_eeprom_len = ixgbe_get_eeprom_len,
3474 .get_eeprom = ixgbe_get_eeprom,
3475 .set_eeprom = ixgbe_set_eeprom,
3476 .get_ringparam = ixgbe_get_ringparam,
3477 .set_ringparam = ixgbe_set_ringparam,
3478 .get_pauseparam = ixgbe_get_pauseparam,
3479 .set_pauseparam = ixgbe_set_pauseparam,
3480 .get_msglevel = ixgbe_get_msglevel,
3481 .set_msglevel = ixgbe_set_msglevel,
3482 .self_test = ixgbe_diag_test,
3483 .get_strings = ixgbe_get_strings,
3484 .set_phys_id = ixgbe_set_phys_id,
3485 .get_sset_count = ixgbe_get_sset_count,
3486 .get_ethtool_stats = ixgbe_get_ethtool_stats,
3487 .get_coalesce = ixgbe_get_coalesce,
3488 .set_coalesce = ixgbe_set_coalesce,
3489 .get_rxnfc = ixgbe_get_rxnfc,
3490 .set_rxnfc = ixgbe_set_rxnfc,
3491 .get_rxfh_indir_size = ixgbe_rss_indir_size,
3492 .get_rxfh_key_size = ixgbe_get_rxfh_key_size,
3493 .get_rxfh = ixgbe_get_rxfh,
3494 .set_rxfh = ixgbe_set_rxfh,
3495 .get_eee = ixgbe_get_eee,
3496 .set_eee = ixgbe_set_eee,
3497 .get_channels = ixgbe_get_channels,
3498 .set_channels = ixgbe_set_channels,
3499 .get_priv_flags = ixgbe_get_priv_flags,
3500 .set_priv_flags = ixgbe_set_priv_flags,
3501 .get_ts_info = ixgbe_get_ts_info,
3502 .get_module_info = ixgbe_get_module_info,
3503 .get_module_eeprom = ixgbe_get_module_eeprom,
3504 .get_link_ksettings = ixgbe_get_link_ksettings,
3505 .set_link_ksettings = ixgbe_set_link_ksettings,
3508 void ixgbe_set_ethtool_ops(struct net_device *netdev)
3510 netdev->ethtool_ops = &ixgbe_ethtool_ops;