1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #include <linux/ethtool.h>
9 #include "ena_netdev.h"
12 char name[ETH_GSTRING_LEN];
16 #define ENA_STAT_ENA_COM_ENTRY(stat) { \
18 .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
21 #define ENA_STAT_ENTRY(stat, stat_type) { \
23 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) / sizeof(u64) \
26 #define ENA_STAT_HW_ENTRY(stat, stat_type) { \
28 .stat_offset = offsetof(struct ena_admin_##stat_type, stat) / sizeof(u64) \
31 #define ENA_STAT_RX_ENTRY(stat) \
32 ENA_STAT_ENTRY(stat, rx)
34 #define ENA_STAT_TX_ENTRY(stat) \
35 ENA_STAT_ENTRY(stat, tx)
37 #define ENA_STAT_GLOBAL_ENTRY(stat) \
38 ENA_STAT_ENTRY(stat, dev)
40 #define ENA_STAT_ENI_ENTRY(stat) \
41 ENA_STAT_HW_ENTRY(stat, eni_stats)
43 static const struct ena_stats ena_stats_global_strings[] = {
44 ENA_STAT_GLOBAL_ENTRY(tx_timeout),
45 ENA_STAT_GLOBAL_ENTRY(suspend),
46 ENA_STAT_GLOBAL_ENTRY(resume),
47 ENA_STAT_GLOBAL_ENTRY(wd_expired),
48 ENA_STAT_GLOBAL_ENTRY(interface_up),
49 ENA_STAT_GLOBAL_ENTRY(interface_down),
50 ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
53 static const struct ena_stats ena_stats_eni_strings[] = {
54 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
55 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
56 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
57 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
58 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
61 static const struct ena_stats ena_stats_tx_strings[] = {
62 ENA_STAT_TX_ENTRY(cnt),
63 ENA_STAT_TX_ENTRY(bytes),
64 ENA_STAT_TX_ENTRY(queue_stop),
65 ENA_STAT_TX_ENTRY(queue_wakeup),
66 ENA_STAT_TX_ENTRY(dma_mapping_err),
67 ENA_STAT_TX_ENTRY(linearize),
68 ENA_STAT_TX_ENTRY(linearize_failed),
69 ENA_STAT_TX_ENTRY(napi_comp),
70 ENA_STAT_TX_ENTRY(tx_poll),
71 ENA_STAT_TX_ENTRY(doorbells),
72 ENA_STAT_TX_ENTRY(prepare_ctx_err),
73 ENA_STAT_TX_ENTRY(bad_req_id),
74 ENA_STAT_TX_ENTRY(llq_buffer_copy),
75 ENA_STAT_TX_ENTRY(missed_tx),
76 ENA_STAT_TX_ENTRY(unmask_interrupt),
79 static const struct ena_stats ena_stats_rx_strings[] = {
80 ENA_STAT_RX_ENTRY(cnt),
81 ENA_STAT_RX_ENTRY(bytes),
82 ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
83 ENA_STAT_RX_ENTRY(csum_good),
84 ENA_STAT_RX_ENTRY(refil_partial),
85 ENA_STAT_RX_ENTRY(bad_csum),
86 ENA_STAT_RX_ENTRY(page_alloc_fail),
87 ENA_STAT_RX_ENTRY(skb_alloc_fail),
88 ENA_STAT_RX_ENTRY(dma_mapping_err),
89 ENA_STAT_RX_ENTRY(bad_desc_num),
90 ENA_STAT_RX_ENTRY(bad_req_id),
91 ENA_STAT_RX_ENTRY(empty_rx_ring),
92 ENA_STAT_RX_ENTRY(csum_unchecked),
93 ENA_STAT_RX_ENTRY(xdp_aborted),
94 ENA_STAT_RX_ENTRY(xdp_drop),
95 ENA_STAT_RX_ENTRY(xdp_pass),
96 ENA_STAT_RX_ENTRY(xdp_tx),
97 ENA_STAT_RX_ENTRY(xdp_invalid),
98 ENA_STAT_RX_ENTRY(xdp_redirect),
101 static const struct ena_stats ena_stats_ena_com_strings[] = {
102 ENA_STAT_ENA_COM_ENTRY(aborted_cmd),
103 ENA_STAT_ENA_COM_ENTRY(submitted_cmd),
104 ENA_STAT_ENA_COM_ENTRY(completed_cmd),
105 ENA_STAT_ENA_COM_ENTRY(out_of_space),
106 ENA_STAT_ENA_COM_ENTRY(no_completion),
109 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
110 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
111 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
112 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
113 #define ENA_STATS_ARRAY_ENI(adapter) \
114 (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
116 static void ena_safe_update_stat(u64 *src, u64 *dst,
117 struct u64_stats_sync *syncp)
122 start = u64_stats_fetch_begin_irq(syncp);
124 } while (u64_stats_fetch_retry_irq(syncp, start));
127 static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
129 const struct ena_stats *ena_stats;
130 struct ena_ring *ring;
135 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
137 ring = &adapter->tx_ring[i];
139 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
140 ena_stats = &ena_stats_tx_strings[j];
142 ptr = (u64 *)&ring->tx_stats + ena_stats->stat_offset;
144 ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
146 /* XDP TX queues don't have a RX queue counterpart */
147 if (!ENA_IS_XDP_INDEX(adapter, i)) {
149 ring = &adapter->rx_ring[i];
151 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
152 ena_stats = &ena_stats_rx_strings[j];
154 ptr = (u64 *)&ring->rx_stats +
155 ena_stats->stat_offset;
157 ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
163 static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
165 const struct ena_stats *ena_stats;
169 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
170 ena_stats = &ena_stats_ena_com_strings[i];
172 ptr = (u64 *)&adapter->ena_dev->admin_queue.stats +
173 ena_stats->stat_offset;
179 static void ena_get_stats(struct ena_adapter *adapter,
181 bool eni_stats_needed)
183 const struct ena_stats *ena_stats;
187 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
188 ena_stats = &ena_stats_global_strings[i];
190 ptr = (u64 *)&adapter->dev_stats + ena_stats->stat_offset;
192 ena_safe_update_stat(ptr, data++, &adapter->syncp);
195 if (eni_stats_needed) {
196 ena_update_hw_stats(adapter);
197 for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
198 ena_stats = &ena_stats_eni_strings[i];
200 ptr = (u64 *)&adapter->eni_stats +
201 ena_stats->stat_offset;
203 ena_safe_update_stat(ptr, data++, &adapter->syncp);
207 ena_queue_stats(adapter, &data);
208 ena_dev_admin_queue_stats(adapter, &data);
211 static void ena_get_ethtool_stats(struct net_device *netdev,
212 struct ethtool_stats *stats,
215 struct ena_adapter *adapter = netdev_priv(netdev);
217 ena_get_stats(adapter, data, adapter->eni_stats_supported);
220 static int ena_get_sw_stats_count(struct ena_adapter *adapter)
222 return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
223 + adapter->xdp_num_queues * ENA_STATS_ARRAY_TX
224 + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
227 static int ena_get_hw_stats_count(struct ena_adapter *adapter)
229 return ENA_STATS_ARRAY_ENI(adapter);
232 int ena_get_sset_count(struct net_device *netdev, int sset)
234 struct ena_adapter *adapter = netdev_priv(netdev);
236 if (sset != ETH_SS_STATS)
239 return ena_get_sw_stats_count(adapter) + ena_get_hw_stats_count(adapter);
242 static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
244 const struct ena_stats *ena_stats;
248 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
249 is_xdp = ENA_IS_XDP_INDEX(adapter, i);
251 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
252 ena_stats = &ena_stats_tx_strings[j];
254 ethtool_sprintf(data,
256 is_xdp ? "xdp_tx" : "tx",
261 /* RX stats, in XDP there isn't a RX queue
264 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
265 ena_stats = &ena_stats_rx_strings[j];
267 ethtool_sprintf(data,
275 static void ena_com_dev_strings(u8 **data)
277 const struct ena_stats *ena_stats;
280 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
281 ena_stats = &ena_stats_ena_com_strings[i];
283 ethtool_sprintf(data,
284 "ena_admin_q_%s", ena_stats->name);
288 static void ena_get_strings(struct ena_adapter *adapter,
290 bool eni_stats_needed)
292 const struct ena_stats *ena_stats;
295 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
296 ena_stats = &ena_stats_global_strings[i];
297 ethtool_sprintf(&data, ena_stats->name);
300 if (eni_stats_needed) {
301 for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
302 ena_stats = &ena_stats_eni_strings[i];
303 ethtool_sprintf(&data, ena_stats->name);
307 ena_queue_strings(adapter, &data);
308 ena_com_dev_strings(&data);
311 static void ena_get_ethtool_strings(struct net_device *netdev,
315 struct ena_adapter *adapter = netdev_priv(netdev);
317 if (sset != ETH_SS_STATS)
320 ena_get_strings(adapter, data, adapter->eni_stats_supported);
323 static int ena_get_link_ksettings(struct net_device *netdev,
324 struct ethtool_link_ksettings *link_ksettings)
326 struct ena_adapter *adapter = netdev_priv(netdev);
327 struct ena_com_dev *ena_dev = adapter->ena_dev;
328 struct ena_admin_get_feature_link_desc *link;
329 struct ena_admin_get_feat_resp feat_resp;
332 rc = ena_com_get_link_params(ena_dev, &feat_resp);
336 link = &feat_resp.u.link;
337 link_ksettings->base.speed = link->speed;
339 if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) {
340 ethtool_link_ksettings_add_link_mode(link_ksettings,
342 ethtool_link_ksettings_add_link_mode(link_ksettings,
346 link_ksettings->base.autoneg =
347 (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ?
348 AUTONEG_ENABLE : AUTONEG_DISABLE;
350 link_ksettings->base.duplex = DUPLEX_FULL;
355 static int ena_get_coalesce(struct net_device *net_dev,
356 struct ethtool_coalesce *coalesce)
358 struct ena_adapter *adapter = netdev_priv(net_dev);
359 struct ena_com_dev *ena_dev = adapter->ena_dev;
361 if (!ena_com_interrupt_moderation_supported(ena_dev))
364 coalesce->tx_coalesce_usecs =
365 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
366 ena_dev->intr_delay_resolution;
368 coalesce->rx_coalesce_usecs =
369 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
370 * ena_dev->intr_delay_resolution;
372 coalesce->use_adaptive_rx_coalesce =
373 ena_com_get_adaptive_moderation_enabled(ena_dev);
378 static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
383 val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
385 for (i = 0; i < adapter->num_io_queues; i++)
386 adapter->tx_ring[i].smoothed_interval = val;
389 static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
394 val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev);
396 for (i = 0; i < adapter->num_io_queues; i++)
397 adapter->rx_ring[i].smoothed_interval = val;
400 static int ena_set_coalesce(struct net_device *net_dev,
401 struct ethtool_coalesce *coalesce)
403 struct ena_adapter *adapter = netdev_priv(net_dev);
404 struct ena_com_dev *ena_dev = adapter->ena_dev;
407 if (!ena_com_interrupt_moderation_supported(ena_dev))
410 rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev,
411 coalesce->tx_coalesce_usecs);
415 ena_update_tx_rings_nonadaptive_intr_moderation(adapter);
417 rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
418 coalesce->rx_coalesce_usecs);
422 ena_update_rx_rings_nonadaptive_intr_moderation(adapter);
424 if (coalesce->use_adaptive_rx_coalesce &&
425 !ena_com_get_adaptive_moderation_enabled(ena_dev))
426 ena_com_enable_adaptive_moderation(ena_dev);
428 if (!coalesce->use_adaptive_rx_coalesce &&
429 ena_com_get_adaptive_moderation_enabled(ena_dev))
430 ena_com_disable_adaptive_moderation(ena_dev);
435 static u32 ena_get_msglevel(struct net_device *netdev)
437 struct ena_adapter *adapter = netdev_priv(netdev);
439 return adapter->msg_enable;
442 static void ena_set_msglevel(struct net_device *netdev, u32 value)
444 struct ena_adapter *adapter = netdev_priv(netdev);
446 adapter->msg_enable = value;
449 static void ena_get_drvinfo(struct net_device *dev,
450 struct ethtool_drvinfo *info)
452 struct ena_adapter *adapter = netdev_priv(dev);
454 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
455 strlcpy(info->bus_info, pci_name(adapter->pdev),
456 sizeof(info->bus_info));
459 static void ena_get_ringparam(struct net_device *netdev,
460 struct ethtool_ringparam *ring)
462 struct ena_adapter *adapter = netdev_priv(netdev);
464 ring->tx_max_pending = adapter->max_tx_ring_size;
465 ring->rx_max_pending = adapter->max_rx_ring_size;
466 ring->tx_pending = adapter->tx_ring[0].ring_size;
467 ring->rx_pending = adapter->rx_ring[0].ring_size;
470 static int ena_set_ringparam(struct net_device *netdev,
471 struct ethtool_ringparam *ring)
473 struct ena_adapter *adapter = netdev_priv(netdev);
474 u32 new_tx_size, new_rx_size;
476 new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
477 ENA_MIN_RING_SIZE : ring->tx_pending;
478 new_tx_size = rounddown_pow_of_two(new_tx_size);
480 new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ?
481 ENA_MIN_RING_SIZE : ring->rx_pending;
482 new_rx_size = rounddown_pow_of_two(new_rx_size);
484 if (new_tx_size == adapter->requested_tx_ring_size &&
485 new_rx_size == adapter->requested_rx_ring_size)
488 return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
491 static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
495 if (hash_fields & ENA_ADMIN_RSS_L2_DA)
498 if (hash_fields & ENA_ADMIN_RSS_L3_DA)
501 if (hash_fields & ENA_ADMIN_RSS_L3_SA)
504 if (hash_fields & ENA_ADMIN_RSS_L4_DP)
505 data |= RXH_L4_B_2_3;
507 if (hash_fields & ENA_ADMIN_RSS_L4_SP)
508 data |= RXH_L4_B_0_1;
513 static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
517 if (hash_fields & RXH_L2DA)
518 data |= ENA_ADMIN_RSS_L2_DA;
520 if (hash_fields & RXH_IP_DST)
521 data |= ENA_ADMIN_RSS_L3_DA;
523 if (hash_fields & RXH_IP_SRC)
524 data |= ENA_ADMIN_RSS_L3_SA;
526 if (hash_fields & RXH_L4_B_2_3)
527 data |= ENA_ADMIN_RSS_L4_DP;
529 if (hash_fields & RXH_L4_B_0_1)
530 data |= ENA_ADMIN_RSS_L4_SP;
535 static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
536 struct ethtool_rxnfc *cmd)
538 enum ena_admin_flow_hash_proto proto;
544 switch (cmd->flow_type) {
546 proto = ENA_ADMIN_RSS_TCP4;
549 proto = ENA_ADMIN_RSS_UDP4;
552 proto = ENA_ADMIN_RSS_TCP6;
555 proto = ENA_ADMIN_RSS_UDP6;
558 proto = ENA_ADMIN_RSS_IP4;
561 proto = ENA_ADMIN_RSS_IP6;
564 proto = ENA_ADMIN_RSS_NOT_IP;
577 rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields);
581 cmd->data = ena_flow_hash_to_flow_type(hash_fields);
586 static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
587 struct ethtool_rxnfc *cmd)
589 enum ena_admin_flow_hash_proto proto;
592 switch (cmd->flow_type) {
594 proto = ENA_ADMIN_RSS_TCP4;
597 proto = ENA_ADMIN_RSS_UDP4;
600 proto = ENA_ADMIN_RSS_TCP6;
603 proto = ENA_ADMIN_RSS_UDP6;
606 proto = ENA_ADMIN_RSS_IP4;
609 proto = ENA_ADMIN_RSS_IP6;
612 proto = ENA_ADMIN_RSS_NOT_IP;
625 hash_fields = ena_flow_data_to_flow_hash(cmd->data);
627 return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
630 static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
632 struct ena_adapter *adapter = netdev_priv(netdev);
637 rc = ena_set_rss_hash(adapter->ena_dev, info);
639 case ETHTOOL_SRXCLSRLDEL:
640 case ETHTOOL_SRXCLSRLINS:
642 netif_err(adapter, drv, netdev,
643 "Command parameter %d is not supported\n", info->cmd);
650 static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
653 struct ena_adapter *adapter = netdev_priv(netdev);
657 case ETHTOOL_GRXRINGS:
658 info->data = adapter->num_io_queues;
662 rc = ena_get_rss_hash(adapter->ena_dev, info);
664 case ETHTOOL_GRXCLSRLCNT:
665 case ETHTOOL_GRXCLSRULE:
666 case ETHTOOL_GRXCLSRLALL:
668 netif_err(adapter, drv, netdev,
669 "Command parameter %d is not supported\n", info->cmd);
676 static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
678 return ENA_RX_RSS_TABLE_SIZE;
681 static u32 ena_get_rxfh_key_size(struct net_device *netdev)
683 return ENA_HASH_KEY_SIZE;
686 static int ena_indirection_table_set(struct ena_adapter *adapter,
689 struct ena_com_dev *ena_dev = adapter->ena_dev;
692 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
693 rc = ena_com_indirect_table_fill_entry(ena_dev,
695 ENA_IO_RXQ_IDX(indir[i]));
697 netif_err(adapter, drv, adapter->netdev,
698 "Cannot fill indirect table (index is too large)\n");
703 rc = ena_com_indirect_table_set(ena_dev);
705 netif_err(adapter, drv, adapter->netdev,
706 "Cannot set indirect table\n");
707 return rc == -EPERM ? -EOPNOTSUPP : rc;
712 static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
714 struct ena_com_dev *ena_dev = adapter->ena_dev;
720 rc = ena_com_indirect_table_get(ena_dev, indir);
724 /* Our internal representation of the indices is: even indices
725 * for Tx and uneven indices for Rx. We need to convert the Rx
726 * indices to be consecutive
728 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
729 indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
734 static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
737 struct ena_adapter *adapter = netdev_priv(netdev);
738 enum ena_admin_hash_functions ena_func;
742 rc = ena_indirection_table_get(adapter, indir);
746 /* We call this function in order to check if the device
747 * supports getting/setting the hash function.
749 rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func);
751 if (rc == -EOPNOTSUPP)
757 rc = ena_com_get_hash_key(adapter->ena_dev, key);
762 case ENA_ADMIN_TOEPLITZ:
763 func = ETH_RSS_HASH_TOP;
765 case ENA_ADMIN_CRC32:
766 func = ETH_RSS_HASH_CRC32;
769 netif_err(adapter, drv, netdev,
770 "Command parameter is not supported\n");
780 static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
781 const u8 *key, const u8 hfunc)
783 struct ena_adapter *adapter = netdev_priv(netdev);
784 struct ena_com_dev *ena_dev = adapter->ena_dev;
785 enum ena_admin_hash_functions func = 0;
789 rc = ena_indirection_table_set(adapter, indir);
795 case ETH_RSS_HASH_NO_CHANGE:
796 func = ena_com_get_current_hash_function(ena_dev);
798 case ETH_RSS_HASH_TOP:
799 func = ENA_ADMIN_TOEPLITZ;
801 case ETH_RSS_HASH_CRC32:
802 func = ENA_ADMIN_CRC32;
805 netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
811 rc = ena_com_fill_hash_function(ena_dev, func, key,
815 netif_err(adapter, drv, netdev, "Cannot fill key\n");
816 return rc == -EPERM ? -EOPNOTSUPP : rc;
823 static void ena_get_channels(struct net_device *netdev,
824 struct ethtool_channels *channels)
826 struct ena_adapter *adapter = netdev_priv(netdev);
828 channels->max_combined = adapter->max_num_io_queues;
829 channels->combined_count = adapter->num_io_queues;
832 static int ena_set_channels(struct net_device *netdev,
833 struct ethtool_channels *channels)
835 struct ena_adapter *adapter = netdev_priv(netdev);
836 u32 count = channels->combined_count;
837 /* The check for max value is already done in ethtool */
838 if (count < ENA_MIN_NUM_IO_QUEUES ||
839 (ena_xdp_present(adapter) &&
840 !ena_xdp_legal_queue_count(adapter, count)))
843 return ena_update_queue_count(adapter, count);
846 static int ena_get_tunable(struct net_device *netdev,
847 const struct ethtool_tunable *tuna, void *data)
849 struct ena_adapter *adapter = netdev_priv(netdev);
853 case ETHTOOL_RX_COPYBREAK:
854 *(u32 *)data = adapter->rx_copybreak;
864 static int ena_set_tunable(struct net_device *netdev,
865 const struct ethtool_tunable *tuna,
868 struct ena_adapter *adapter = netdev_priv(netdev);
873 case ETHTOOL_RX_COPYBREAK:
875 if (len > adapter->netdev->mtu) {
879 adapter->rx_copybreak = len;
889 static const struct ethtool_ops ena_ethtool_ops = {
890 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
891 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
892 .get_link_ksettings = ena_get_link_ksettings,
893 .get_drvinfo = ena_get_drvinfo,
894 .get_msglevel = ena_get_msglevel,
895 .set_msglevel = ena_set_msglevel,
896 .get_link = ethtool_op_get_link,
897 .get_coalesce = ena_get_coalesce,
898 .set_coalesce = ena_set_coalesce,
899 .get_ringparam = ena_get_ringparam,
900 .set_ringparam = ena_set_ringparam,
901 .get_sset_count = ena_get_sset_count,
902 .get_strings = ena_get_ethtool_strings,
903 .get_ethtool_stats = ena_get_ethtool_stats,
904 .get_rxnfc = ena_get_rxnfc,
905 .set_rxnfc = ena_set_rxnfc,
906 .get_rxfh_indir_size = ena_get_rxfh_indir_size,
907 .get_rxfh_key_size = ena_get_rxfh_key_size,
908 .get_rxfh = ena_get_rxfh,
909 .set_rxfh = ena_set_rxfh,
910 .get_channels = ena_get_channels,
911 .set_channels = ena_set_channels,
912 .get_tunable = ena_get_tunable,
913 .set_tunable = ena_set_tunable,
914 .get_ts_info = ethtool_op_get_ts_info,
917 void ena_set_ethtool_ops(struct net_device *netdev)
919 netdev->ethtool_ops = &ena_ethtool_ops;
922 static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
924 struct net_device *netdev = adapter->netdev;
930 strings_num = ena_get_sw_stats_count(adapter);
931 if (strings_num <= 0) {
932 netif_err(adapter, drv, netdev, "Can't get stats num\n");
936 strings_buf = devm_kcalloc(&adapter->pdev->dev,
937 ETH_GSTRING_LEN, strings_num,
940 netif_err(adapter, drv, netdev,
941 "Failed to allocate strings_buf\n");
945 data_buf = devm_kcalloc(&adapter->pdev->dev,
946 strings_num, sizeof(u64),
949 netif_err(adapter, drv, netdev,
950 "Failed to allocate data buf\n");
951 devm_kfree(&adapter->pdev->dev, strings_buf);
955 ena_get_strings(adapter, strings_buf, false);
956 ena_get_stats(adapter, data_buf, false);
958 /* If there is a buffer, dump stats, otherwise print them to dmesg */
960 for (i = 0; i < strings_num; i++) {
961 rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64),
963 strings_buf + i * ETH_GSTRING_LEN,
968 for (i = 0; i < strings_num; i++)
969 netif_err(adapter, drv, netdev, "%s: %llu\n",
970 strings_buf + i * ETH_GSTRING_LEN,
973 devm_kfree(&adapter->pdev->dev, strings_buf);
974 devm_kfree(&adapter->pdev->dev, data_buf);
977 void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
982 ena_dump_stats_ex(adapter, buf);
985 void ena_dump_stats_to_dmesg(struct ena_adapter *adapter)
987 ena_dump_stats_ex(adapter, NULL);