1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2013 Cisco Systems, Inc. All rights reserved.
4 #include <linux/netdevice.h>
5 #include <linux/ethtool.h>
6 #include <linux/net_tstamp.h>
11 #include "enic_clsf.h"
13 #include "vnic_stats.h"
16 char name[ETH_GSTRING_LEN];
20 #define ENIC_TX_STAT(stat) { \
22 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
25 #define ENIC_RX_STAT(stat) { \
27 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
30 #define ENIC_GEN_STAT(stat) { \
32 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
35 #define ENIC_PER_RQ_STAT(stat) { \
36 .name = "rq[%d]_"#stat, \
37 .index = offsetof(struct enic_rq_stats, stat) / sizeof(u64) \
40 #define ENIC_PER_WQ_STAT(stat) { \
41 .name = "wq[%d]_"#stat, \
42 .index = offsetof(struct enic_wq_stats, stat) / sizeof(u64) \
45 static const struct enic_stat enic_per_rq_stats[] = {
46 ENIC_PER_RQ_STAT(l4_rss_hash),
47 ENIC_PER_RQ_STAT(l3_rss_hash),
48 ENIC_PER_RQ_STAT(csum_unnecessary_encap),
49 ENIC_PER_RQ_STAT(vlan_stripped),
50 ENIC_PER_RQ_STAT(napi_complete),
51 ENIC_PER_RQ_STAT(napi_repoll),
52 ENIC_PER_RQ_STAT(no_skb),
53 ENIC_PER_RQ_STAT(desc_skip),
56 #define NUM_ENIC_PER_RQ_STATS ARRAY_SIZE(enic_per_rq_stats)
58 static const struct enic_stat enic_per_wq_stats[] = {
59 ENIC_PER_WQ_STAT(encap_tso),
60 ENIC_PER_WQ_STAT(encap_csum),
61 ENIC_PER_WQ_STAT(add_vlan),
62 ENIC_PER_WQ_STAT(cq_work),
63 ENIC_PER_WQ_STAT(cq_bytes),
64 ENIC_PER_WQ_STAT(null_pkt),
65 ENIC_PER_WQ_STAT(skb_linear_fail),
66 ENIC_PER_WQ_STAT(desc_full_awake),
69 #define NUM_ENIC_PER_WQ_STATS ARRAY_SIZE(enic_per_wq_stats)
70 static const struct enic_stat enic_tx_stats[] = {
71 ENIC_TX_STAT(tx_frames_ok),
72 ENIC_TX_STAT(tx_unicast_frames_ok),
73 ENIC_TX_STAT(tx_multicast_frames_ok),
74 ENIC_TX_STAT(tx_broadcast_frames_ok),
75 ENIC_TX_STAT(tx_bytes_ok),
76 ENIC_TX_STAT(tx_unicast_bytes_ok),
77 ENIC_TX_STAT(tx_multicast_bytes_ok),
78 ENIC_TX_STAT(tx_broadcast_bytes_ok),
79 ENIC_TX_STAT(tx_drops),
80 ENIC_TX_STAT(tx_errors),
84 #define NUM_ENIC_TX_STATS ARRAY_SIZE(enic_tx_stats)
86 static const struct enic_stat enic_rx_stats[] = {
87 ENIC_RX_STAT(rx_frames_ok),
88 ENIC_RX_STAT(rx_frames_total),
89 ENIC_RX_STAT(rx_unicast_frames_ok),
90 ENIC_RX_STAT(rx_multicast_frames_ok),
91 ENIC_RX_STAT(rx_broadcast_frames_ok),
92 ENIC_RX_STAT(rx_bytes_ok),
93 ENIC_RX_STAT(rx_unicast_bytes_ok),
94 ENIC_RX_STAT(rx_multicast_bytes_ok),
95 ENIC_RX_STAT(rx_broadcast_bytes_ok),
96 ENIC_RX_STAT(rx_drop),
97 ENIC_RX_STAT(rx_no_bufs),
98 ENIC_RX_STAT(rx_errors),
100 ENIC_RX_STAT(rx_crc_errors),
101 ENIC_RX_STAT(rx_frames_64),
102 ENIC_RX_STAT(rx_frames_127),
103 ENIC_RX_STAT(rx_frames_255),
104 ENIC_RX_STAT(rx_frames_511),
105 ENIC_RX_STAT(rx_frames_1023),
106 ENIC_RX_STAT(rx_frames_1518),
107 ENIC_RX_STAT(rx_frames_to_max),
110 #define NUM_ENIC_RX_STATS ARRAY_SIZE(enic_rx_stats)
112 static const struct enic_stat enic_gen_stats[] = {
113 ENIC_GEN_STAT(dma_map_error),
116 #define NUM_ENIC_GEN_STATS ARRAY_SIZE(enic_gen_stats)
118 static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
123 for (i = 0; i < enic->rq_count; i++) {
124 intr = enic_msix_rq_intr(enic, i);
125 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
129 static int enic_get_ksettings(struct net_device *netdev,
130 struct ethtool_link_ksettings *ecmd)
132 struct enic *enic = netdev_priv(netdev);
133 struct ethtool_link_settings *base = &ecmd->base;
135 ethtool_link_ksettings_add_link_mode(ecmd, supported,
137 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
138 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
140 ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
141 base->port = PORT_FIBRE;
143 if (netif_carrier_ok(netdev)) {
144 base->speed = vnic_dev_port_speed(enic->vdev);
145 base->duplex = DUPLEX_FULL;
147 base->speed = SPEED_UNKNOWN;
148 base->duplex = DUPLEX_UNKNOWN;
151 base->autoneg = AUTONEG_DISABLE;
156 static void enic_get_drvinfo(struct net_device *netdev,
157 struct ethtool_drvinfo *drvinfo)
159 struct enic *enic = netdev_priv(netdev);
160 struct vnic_devcmd_fw_info *fw_info;
163 err = enic_dev_fw_info(enic, &fw_info);
164 /* return only when dma_alloc_coherent fails in vnic_dev_fw_info
165 * For other failures, like devcmd failure, we return previously
171 strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
172 strscpy(drvinfo->fw_version, fw_info->fw_version,
173 sizeof(drvinfo->fw_version));
174 strscpy(drvinfo->bus_info, pci_name(enic->pdev),
175 sizeof(drvinfo->bus_info));
178 static void enic_get_strings(struct net_device *netdev, u32 stringset,
181 struct enic *enic = netdev_priv(netdev);
187 for (i = 0; i < NUM_ENIC_TX_STATS; i++) {
188 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
189 data += ETH_GSTRING_LEN;
191 for (i = 0; i < NUM_ENIC_RX_STATS; i++) {
192 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
193 data += ETH_GSTRING_LEN;
195 for (i = 0; i < NUM_ENIC_GEN_STATS; i++) {
196 memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
197 data += ETH_GSTRING_LEN;
199 for (i = 0; i < enic->rq_count; i++) {
200 for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
201 snprintf(data, ETH_GSTRING_LEN,
202 enic_per_rq_stats[j].name, i);
203 data += ETH_GSTRING_LEN;
206 for (i = 0; i < enic->wq_count; i++) {
207 for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
208 snprintf(data, ETH_GSTRING_LEN,
209 enic_per_wq_stats[j].name, i);
210 data += ETH_GSTRING_LEN;
217 static void enic_get_ringparam(struct net_device *netdev,
218 struct ethtool_ringparam *ring,
219 struct kernel_ethtool_ringparam *kernel_ring,
220 struct netlink_ext_ack *extack)
222 struct enic *enic = netdev_priv(netdev);
223 struct vnic_enet_config *c = &enic->config;
225 ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
226 ring->rx_pending = c->rq_desc_count;
227 ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
228 ring->tx_pending = c->wq_desc_count;
231 static int enic_set_ringparam(struct net_device *netdev,
232 struct ethtool_ringparam *ring,
233 struct kernel_ethtool_ringparam *kernel_ring,
234 struct netlink_ext_ack *extack)
236 struct enic *enic = netdev_priv(netdev);
237 struct vnic_enet_config *c = &enic->config;
238 int running = netif_running(netdev);
239 unsigned int rx_pending;
240 unsigned int tx_pending;
243 if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
245 "modifying mini ring params is not supported");
248 if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
250 "modifying jumbo ring params is not supported");
253 rx_pending = c->rq_desc_count;
254 tx_pending = c->wq_desc_count;
255 if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
256 ring->rx_pending < ENIC_MIN_RQ_DESCS) {
257 netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
258 ring->rx_pending, ENIC_MIN_RQ_DESCS,
262 if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
263 ring->tx_pending < ENIC_MIN_WQ_DESCS) {
264 netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
265 ring->tx_pending, ENIC_MIN_WQ_DESCS,
272 ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
274 ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
275 enic_free_vnic_resources(enic);
276 err = enic_alloc_vnic_resources(enic);
279 "Failed to alloc vNIC resources, aborting\n");
280 enic_free_vnic_resources(enic);
283 enic_init_vnic_resources(enic);
285 err = dev_open(netdev, NULL);
291 c->rq_desc_count = rx_pending;
292 c->wq_desc_count = tx_pending;
296 static int enic_get_sset_count(struct net_device *netdev, int sset)
298 struct enic *enic = netdev_priv(netdev);
299 unsigned int n_per_rq_stats;
300 unsigned int n_per_wq_stats;
301 unsigned int n_stats;
305 n_per_rq_stats = NUM_ENIC_PER_RQ_STATS * enic->rq_count;
306 n_per_wq_stats = NUM_ENIC_PER_WQ_STATS * enic->wq_count;
307 n_stats = NUM_ENIC_TX_STATS + NUM_ENIC_RX_STATS +
309 n_per_rq_stats + n_per_wq_stats;
316 static void enic_get_ethtool_stats(struct net_device *netdev,
317 struct ethtool_stats *stats, u64 *data)
319 struct enic *enic = netdev_priv(netdev);
320 struct vnic_stats *vstats;
325 err = enic_dev_stats_dump(enic, &vstats);
326 /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
327 * For other failures, like devcmd failure, we return previously
333 for (i = 0; i < NUM_ENIC_TX_STATS; i++)
334 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
335 for (i = 0; i < NUM_ENIC_RX_STATS; i++)
336 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
337 for (i = 0; i < NUM_ENIC_GEN_STATS; i++)
338 *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
339 for (i = 0; i < enic->rq_count; i++) {
340 struct enic_rq_stats *rqstats = &enic->rq[i].stats;
343 for (j = 0; j < NUM_ENIC_PER_RQ_STATS; j++) {
344 index = enic_per_rq_stats[j].index;
345 *(data++) = ((u64 *)rqstats)[index];
348 for (i = 0; i < enic->wq_count; i++) {
349 struct enic_wq_stats *wqstats = &enic->wq[i].stats;
352 for (j = 0; j < NUM_ENIC_PER_WQ_STATS; j++) {
353 index = enic_per_wq_stats[j].index;
354 *(data++) = ((u64 *)wqstats)[index];
359 static u32 enic_get_msglevel(struct net_device *netdev)
361 struct enic *enic = netdev_priv(netdev);
362 return enic->msg_enable;
365 static void enic_set_msglevel(struct net_device *netdev, u32 value)
367 struct enic *enic = netdev_priv(netdev);
368 enic->msg_enable = value;
371 static int enic_get_coalesce(struct net_device *netdev,
372 struct ethtool_coalesce *ecmd,
373 struct kernel_ethtool_coalesce *kernel_coal,
374 struct netlink_ext_ack *extack)
376 struct enic *enic = netdev_priv(netdev);
377 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
379 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
380 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
381 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
382 if (rxcoal->use_adaptive_rx_coalesce)
383 ecmd->use_adaptive_rx_coalesce = 1;
384 ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
385 ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
390 static int enic_coalesce_valid(struct enic *enic,
391 struct ethtool_coalesce *ec)
393 u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
394 u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
395 ec->rx_coalesce_usecs_high);
396 u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
397 ec->rx_coalesce_usecs_low);
399 if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
400 ec->tx_coalesce_usecs)
403 if ((ec->tx_coalesce_usecs > coalesce_usecs_max) ||
404 (ec->rx_coalesce_usecs > coalesce_usecs_max) ||
405 (ec->rx_coalesce_usecs_low > coalesce_usecs_max) ||
406 (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
407 netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
410 if (ec->rx_coalesce_usecs_high &&
411 (rx_coalesce_usecs_high <
412 rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
418 static int enic_set_coalesce(struct net_device *netdev,
419 struct ethtool_coalesce *ecmd,
420 struct kernel_ethtool_coalesce *kernel_coal,
421 struct netlink_ext_ack *extack)
423 struct enic *enic = netdev_priv(netdev);
424 u32 tx_coalesce_usecs;
425 u32 rx_coalesce_usecs;
426 u32 rx_coalesce_usecs_low;
427 u32 rx_coalesce_usecs_high;
428 u32 coalesce_usecs_max;
429 unsigned int i, intr;
431 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
433 ret = enic_coalesce_valid(enic, ecmd);
436 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
437 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
439 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
442 rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
444 rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
447 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
448 for (i = 0; i < enic->wq_count; i++) {
449 intr = enic_msix_wq_intr(enic, i);
450 vnic_intr_coalescing_timer_set(&enic->intr[intr],
453 enic->tx_coalesce_usecs = tx_coalesce_usecs;
455 rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
456 if (!rxcoal->use_adaptive_rx_coalesce)
457 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
458 if (ecmd->rx_coalesce_usecs_high) {
459 rxcoal->range_end = rx_coalesce_usecs_high;
460 rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
461 rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
462 ENIC_AIC_LARGE_PKT_DIFF;
465 enic->rx_coalesce_usecs = rx_coalesce_usecs;
470 static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
473 int j, ret = 0, cnt = 0;
475 cmd->data = enic->rfs_h.max - enic->rfs_h.free;
476 for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
477 struct hlist_head *hhead;
478 struct hlist_node *tmp;
479 struct enic_rfs_fltr_node *n;
481 hhead = &enic->rfs_h.ht_head[j];
482 hlist_for_each_entry_safe(n, tmp, hhead, node) {
483 if (cnt == cmd->rule_cnt)
485 rule_locs[cnt] = n->fltr_id;
494 static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
496 struct ethtool_rx_flow_spec *fsp =
497 (struct ethtool_rx_flow_spec *)&cmd->fs;
498 struct enic_rfs_fltr_node *n;
500 n = htbl_fltr_search(enic, (u16)fsp->location);
503 switch (n->keys.basic.ip_proto) {
505 fsp->flow_type = TCP_V4_FLOW;
508 fsp->flow_type = UDP_V4_FLOW;
514 fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
515 fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
517 fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
518 fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
520 fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
521 fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
523 fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
524 fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
526 fsp->ring_cookie = n->rq_id;
531 static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
533 u8 rss_hash_type = 0;
536 spin_lock_bh(&enic->devcmd_lock);
537 (void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
538 spin_unlock_bh(&enic->devcmd_lock);
539 switch (cmd->flow_type) {
542 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
543 RXH_IP_SRC | RXH_IP_DST;
546 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
547 if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
548 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
551 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
552 if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
553 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
565 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
574 static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
577 struct enic *enic = netdev_priv(dev);
581 case ETHTOOL_GRXRINGS:
582 cmd->data = enic->rq_count;
584 case ETHTOOL_GRXCLSRLCNT:
585 spin_lock_bh(&enic->rfs_h.lock);
586 cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
587 cmd->data = enic->rfs_h.max;
588 spin_unlock_bh(&enic->rfs_h.lock);
590 case ETHTOOL_GRXCLSRLALL:
591 spin_lock_bh(&enic->rfs_h.lock);
592 ret = enic_grxclsrlall(enic, cmd, rule_locs);
593 spin_unlock_bh(&enic->rfs_h.lock);
595 case ETHTOOL_GRXCLSRULE:
596 spin_lock_bh(&enic->rfs_h.lock);
597 ret = enic_grxclsrule(enic, cmd);
598 spin_unlock_bh(&enic->rfs_h.lock);
601 ret = enic_get_rx_flow_hash(enic, cmd);
611 static int enic_get_tunable(struct net_device *dev,
612 const struct ethtool_tunable *tuna, void *data)
614 struct enic *enic = netdev_priv(dev);
618 case ETHTOOL_RX_COPYBREAK:
619 *(u32 *)data = enic->rx_copybreak;
629 static int enic_set_tunable(struct net_device *dev,
630 const struct ethtool_tunable *tuna,
633 struct enic *enic = netdev_priv(dev);
637 case ETHTOOL_RX_COPYBREAK:
638 enic->rx_copybreak = *(u32 *)data;
648 static u32 enic_get_rxfh_key_size(struct net_device *netdev)
653 static int enic_get_rxfh(struct net_device *netdev,
654 struct ethtool_rxfh_param *rxfh)
656 struct enic *enic = netdev_priv(netdev);
659 memcpy(rxfh->key, enic->rss_key, ENIC_RSS_LEN);
661 rxfh->hfunc = ETH_RSS_HASH_TOP;
666 static int enic_set_rxfh(struct net_device *netdev,
667 struct ethtool_rxfh_param *rxfh,
668 struct netlink_ext_ack *extack)
670 struct enic *enic = netdev_priv(netdev);
673 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
674 rxfh->hfunc != ETH_RSS_HASH_TOP))
678 memcpy(enic->rss_key, rxfh->key, ENIC_RSS_LEN);
680 return __enic_set_rsskey(enic);
683 static int enic_get_ts_info(struct net_device *netdev,
684 struct kernel_ethtool_ts_info *info)
686 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
691 static void enic_get_channels(struct net_device *netdev,
692 struct ethtool_channels *channels)
694 struct enic *enic = netdev_priv(netdev);
696 switch (vnic_dev_get_intr_mode(enic->vdev)) {
697 case VNIC_DEV_INTR_MODE_MSIX:
698 channels->max_rx = min(enic->rq_avail, ENIC_RQ_MAX);
699 channels->max_tx = min(enic->wq_avail, ENIC_WQ_MAX);
700 channels->rx_count = enic->rq_count;
701 channels->tx_count = enic->wq_count;
703 case VNIC_DEV_INTR_MODE_MSI:
704 case VNIC_DEV_INTR_MODE_INTX:
705 channels->max_combined = 1;
706 channels->combined_count = 1;
713 static const struct ethtool_ops enic_ethtool_ops = {
714 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
715 ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
716 ETHTOOL_COALESCE_RX_USECS_LOW |
717 ETHTOOL_COALESCE_RX_USECS_HIGH,
718 .get_drvinfo = enic_get_drvinfo,
719 .get_msglevel = enic_get_msglevel,
720 .set_msglevel = enic_set_msglevel,
721 .get_link = ethtool_op_get_link,
722 .get_strings = enic_get_strings,
723 .get_ringparam = enic_get_ringparam,
724 .set_ringparam = enic_set_ringparam,
725 .get_sset_count = enic_get_sset_count,
726 .get_ethtool_stats = enic_get_ethtool_stats,
727 .get_coalesce = enic_get_coalesce,
728 .set_coalesce = enic_set_coalesce,
729 .get_rxnfc = enic_get_rxnfc,
730 .get_tunable = enic_get_tunable,
731 .set_tunable = enic_set_tunable,
732 .get_rxfh_key_size = enic_get_rxfh_key_size,
733 .get_rxfh = enic_get_rxfh,
734 .set_rxfh = enic_set_rxfh,
735 .get_link_ksettings = enic_get_ksettings,
736 .get_ts_info = enic_get_ts_info,
737 .get_channels = enic_get_channels,
740 void enic_set_ethtool_ops(struct net_device *netdev)
742 netdev->ethtool_ops = &enic_ethtool_ops;