1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2024 Google LLC
7 #include <linux/rtnetlink.h>
9 #include "gve_adminq.h"
11 #include "gve_utils.h"
13 static void gve_get_drvinfo(struct net_device *netdev,
14 struct ethtool_drvinfo *info)
16 struct gve_priv *priv = netdev_priv(netdev);
18 strscpy(info->driver, gve_driver_name, sizeof(info->driver));
19 strscpy(info->version, gve_version_str, sizeof(info->version));
20 strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
23 static void gve_set_msglevel(struct net_device *netdev, u32 value)
25 struct gve_priv *priv = netdev_priv(netdev);
27 priv->msg_enable = value;
30 static u32 gve_get_msglevel(struct net_device *netdev)
32 struct gve_priv *priv = netdev_priv(netdev);
34 return priv->msg_enable;
37 /* For the following stats column string names, make sure the order
38 * matches how it is filled in the code. For xdp_aborted, xdp_drop,
39 * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order
40 * as declared in enum xdp_action inside file uapi/linux/bpf.h .
42 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
43 "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
44 "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
45 "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
46 "rx_hsplit_unsplit_pkt",
47 "interface_up_cnt", "interface_down_cnt", "reset_cnt",
48 "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
51 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
52 "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
53 "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
54 "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
55 "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
56 "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
57 "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
58 "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
59 "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
60 "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
63 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
64 "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
65 "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
66 "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
67 "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
70 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
71 "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
72 "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
73 "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
74 "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
75 "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
76 "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
77 "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
78 "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
79 "adminq_query_rss_cnt",
82 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
86 #define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats)
87 #define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats)
88 #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
89 #define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
90 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
92 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
94 struct gve_priv *priv = netdev_priv(netdev);
99 num_tx_queues = gve_num_tx_queues(priv);
102 for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
103 ethtool_puts(&s, gve_gstrings_main_stats[i]);
105 for (i = 0; i < priv->rx_cfg.num_queues; i++)
106 for (j = 0; j < NUM_GVE_RX_CNTS; j++)
107 ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
110 for (i = 0; i < num_tx_queues; i++)
111 for (j = 0; j < NUM_GVE_TX_CNTS; j++)
112 ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
115 for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
116 ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
120 case ETH_SS_PRIV_FLAGS:
121 for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
122 ethtool_puts(&s, gve_gstrings_priv_flags[i]);
130 static int gve_get_sset_count(struct net_device *netdev, int sset)
132 struct gve_priv *priv = netdev_priv(netdev);
135 num_tx_queues = gve_num_tx_queues(priv);
138 return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
139 (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
140 (num_tx_queues * NUM_GVE_TX_CNTS);
141 case ETH_SS_PRIV_FLAGS:
142 return GVE_PRIV_FLAGS_STR_LEN;
149 gve_get_ethtool_stats(struct net_device *netdev,
150 struct ethtool_stats *stats, u64 *data)
152 u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
153 tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
154 tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
155 tmp_tx_pkts, tmp_tx_bytes;
156 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
157 rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
159 int stats_idx, base_stats_idx, max_stats_idx;
160 struct stats *report_stats;
161 int *rx_qid_to_stats_idx;
162 int *tx_qid_to_stats_idx;
163 int num_stopped_rxqs = 0;
164 int num_stopped_txqs = 0;
165 struct gve_priv *priv;
174 priv = netdev_priv(netdev);
175 num_tx_queues = gve_num_tx_queues(priv);
176 report_stats = priv->stats_report->stats;
177 rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
178 sizeof(int), GFP_KERNEL);
179 if (!rx_qid_to_stats_idx)
181 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
182 rx_qid_to_stats_idx[ring] = -1;
183 if (!gve_rx_was_added_to_block(priv, ring))
186 tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
187 sizeof(int), GFP_KERNEL);
188 if (!tx_qid_to_stats_idx) {
189 kfree(rx_qid_to_stats_idx);
192 for (ring = 0; ring < num_tx_queues; ring++) {
193 tx_qid_to_stats_idx[ring] = -1;
194 if (!gve_tx_was_added_to_block(priv, ring))
198 for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
199 rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
200 rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
202 ring < priv->rx_cfg.num_queues; ring++) {
205 struct gve_rx_ring *rx = &priv->rx[ring];
208 u64_stats_fetch_begin(&priv->rx[ring].statss);
209 tmp_rx_pkts = rx->rpackets;
210 tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
211 tmp_rx_bytes = rx->rbytes;
212 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
213 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
214 tmp_rx_desc_err_dropped_pkt =
215 rx->rx_desc_err_dropped_pkt;
216 tmp_rx_hsplit_unsplit_pkt =
217 rx->rx_hsplit_unsplit_pkt;
218 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
220 rx_pkts += tmp_rx_pkts;
221 rx_hsplit_pkt += tmp_rx_hsplit_pkt;
222 rx_bytes += tmp_rx_bytes;
223 rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
224 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
225 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
226 rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
229 for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
230 ring < num_tx_queues; ring++) {
234 u64_stats_fetch_begin(&priv->tx[ring].statss);
235 tmp_tx_pkts = priv->tx[ring].pkt_done;
236 tmp_tx_bytes = priv->tx[ring].bytes_done;
237 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
239 tx_pkts += tmp_tx_pkts;
240 tx_bytes += tmp_tx_bytes;
241 tx_dropped += priv->tx[ring].dropped_pkt;
247 data[i++] = rx_hsplit_pkt;
249 data[i++] = rx_bytes;
250 data[i++] = tx_bytes;
251 /* total rx dropped packets */
252 data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
253 rx_desc_err_dropped_pkt;
254 data[i++] = tx_dropped;
255 data[i++] = priv->tx_timeo_cnt;
256 data[i++] = rx_skb_alloc_fail;
257 data[i++] = rx_buf_alloc_fail;
258 data[i++] = rx_desc_err_dropped_pkt;
259 data[i++] = rx_hsplit_unsplit_pkt;
260 data[i++] = priv->interface_up_cnt;
261 data[i++] = priv->interface_down_cnt;
262 data[i++] = priv->reset_cnt;
263 data[i++] = priv->page_alloc_fail;
264 data[i++] = priv->dma_mapping_error;
265 data[i++] = priv->stats_report_trigger_cnt;
266 i = GVE_MAIN_STATS_LEN;
268 /* For rx cross-reporting stats, start from nic rx stats in report */
269 base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
270 GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
271 /* The boundary between driver stats and NIC stats shifts if there are
274 base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
275 NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
276 max_stats_idx = NIC_RX_STATS_REPORT_NUM *
277 (priv->rx_cfg.num_queues - num_stopped_rxqs) +
279 /* Preprocess the stats report for rx, map queue id to start index */
280 skip_nic_stats = false;
281 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
282 stats_idx += NIC_RX_STATS_REPORT_NUM) {
283 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
284 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
286 if (stat_name == 0) {
287 /* no stats written by NIC yet */
288 skip_nic_stats = true;
291 if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
292 net_err_ratelimited("Invalid rxq id in NIC stats\n");
295 rx_qid_to_stats_idx[queue_id] = stats_idx;
299 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
300 struct gve_rx_ring *rx = &priv->rx[ring];
302 data[i++] = rx->fill_cnt;
304 data[i++] = rx->fill_cnt - rx->cnt;
307 u64_stats_fetch_begin(&priv->rx[ring].statss);
308 tmp_rx_bytes = rx->rbytes;
309 tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
310 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
311 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
312 tmp_rx_desc_err_dropped_pkt =
313 rx->rx_desc_err_dropped_pkt;
314 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
316 data[i++] = tmp_rx_bytes;
317 data[i++] = tmp_rx_hsplit_bytes;
318 data[i++] = rx->rx_cont_packet_cnt;
319 data[i++] = rx->rx_frag_flip_cnt;
320 data[i++] = rx->rx_frag_copy_cnt;
321 data[i++] = rx->rx_frag_alloc_cnt;
322 /* rx dropped packets */
323 data[i++] = tmp_rx_skb_alloc_fail +
324 tmp_rx_buf_alloc_fail +
325 tmp_rx_desc_err_dropped_pkt;
326 data[i++] = rx->rx_copybreak_pkt;
327 data[i++] = rx->rx_copied_pkt;
329 stats_idx = rx_qid_to_stats_idx[ring];
330 if (skip_nic_stats || stats_idx < 0) {
331 /* skip NIC rx stats */
332 i += NIC_RX_STATS_REPORT_NUM;
334 for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
336 be64_to_cpu(report_stats[stats_idx + j].value);
341 /* XDP rx counters */
343 start = u64_stats_fetch_begin(&priv->rx[ring].statss);
344 for (j = 0; j < GVE_XDP_ACTIONS; j++)
345 data[i + j] = rx->xdp_actions[j];
346 data[i + j++] = rx->xdp_tx_errors;
347 data[i + j++] = rx->xdp_redirect_errors;
348 data[i + j++] = rx->xdp_alloc_fails;
349 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
351 i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
354 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
357 /* For tx cross-reporting stats, start from nic tx stats in report */
358 base_stats_idx = max_stats_idx;
359 max_stats_idx = NIC_TX_STATS_REPORT_NUM *
360 (num_tx_queues - num_stopped_txqs) +
362 /* Preprocess the stats report for tx, map queue id to start index */
363 skip_nic_stats = false;
364 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
365 stats_idx += NIC_TX_STATS_REPORT_NUM) {
366 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
367 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
369 if (stat_name == 0) {
370 /* no stats written by NIC yet */
371 skip_nic_stats = true;
374 if (queue_id < 0 || queue_id >= num_tx_queues) {
375 net_err_ratelimited("Invalid txq id in NIC stats\n");
378 tx_qid_to_stats_idx[queue_id] = stats_idx;
382 for (ring = 0; ring < num_tx_queues; ring++) {
383 struct gve_tx_ring *tx = &priv->tx[ring];
385 if (gve_is_gqi(priv)) {
387 data[i++] = tx->done;
388 data[i++] = tx->req - tx->done;
390 /* DQO doesn't currently support
391 * posted/completed descriptor counts;
395 data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
399 u64_stats_fetch_begin(&priv->tx[ring].statss);
400 tmp_tx_bytes = tx->bytes_done;
401 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
403 data[i++] = tmp_tx_bytes;
404 data[i++] = tx->wake_queue;
405 data[i++] = tx->stop_queue;
406 data[i++] = gve_tx_load_event_counter(priv, tx);
407 data[i++] = tx->dma_mapping_error;
409 stats_idx = tx_qid_to_stats_idx[ring];
410 if (skip_nic_stats || stats_idx < 0) {
411 /* skip NIC tx stats */
412 i += NIC_TX_STATS_REPORT_NUM;
414 for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
416 be64_to_cpu(report_stats[stats_idx + j].value);
420 /* XDP xsk counters */
421 data[i++] = tx->xdp_xsk_wakeup;
422 data[i++] = tx->xdp_xsk_done;
424 start = u64_stats_fetch_begin(&priv->tx[ring].statss);
425 data[i] = tx->xdp_xsk_sent;
426 data[i + 1] = tx->xdp_xmit;
427 data[i + 2] = tx->xdp_xmit_errors;
428 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
430 i += 3; /* XDP tx counters */
433 i += num_tx_queues * NUM_GVE_TX_CNTS;
436 kfree(rx_qid_to_stats_idx);
437 kfree(tx_qid_to_stats_idx);
439 data[i++] = priv->adminq_prod_cnt;
440 data[i++] = priv->adminq_cmd_fail;
441 data[i++] = priv->adminq_timeouts;
442 data[i++] = priv->adminq_describe_device_cnt;
443 data[i++] = priv->adminq_cfg_device_resources_cnt;
444 data[i++] = priv->adminq_register_page_list_cnt;
445 data[i++] = priv->adminq_unregister_page_list_cnt;
446 data[i++] = priv->adminq_create_tx_queue_cnt;
447 data[i++] = priv->adminq_create_rx_queue_cnt;
448 data[i++] = priv->adminq_destroy_tx_queue_cnt;
449 data[i++] = priv->adminq_destroy_rx_queue_cnt;
450 data[i++] = priv->adminq_dcfg_device_resources_cnt;
451 data[i++] = priv->adminq_set_driver_parameter_cnt;
452 data[i++] = priv->adminq_report_stats_cnt;
453 data[i++] = priv->adminq_report_link_speed_cnt;
454 data[i++] = priv->adminq_get_ptype_map_cnt;
455 data[i++] = priv->adminq_query_flow_rules_cnt;
456 data[i++] = priv->adminq_cfg_flow_rule_cnt;
457 data[i++] = priv->adminq_cfg_rss_cnt;
458 data[i++] = priv->adminq_query_rss_cnt;
461 static void gve_get_channels(struct net_device *netdev,
462 struct ethtool_channels *cmd)
464 struct gve_priv *priv = netdev_priv(netdev);
466 cmd->max_rx = priv->rx_cfg.max_queues;
467 cmd->max_tx = priv->tx_cfg.max_queues;
469 cmd->max_combined = 0;
470 cmd->rx_count = priv->rx_cfg.num_queues;
471 cmd->tx_count = priv->tx_cfg.num_queues;
472 cmd->other_count = 0;
473 cmd->combined_count = 0;
476 static int gve_set_channels(struct net_device *netdev,
477 struct ethtool_channels *cmd)
479 struct gve_priv *priv = netdev_priv(netdev);
480 struct gve_queue_config new_tx_cfg = priv->tx_cfg;
481 struct gve_queue_config new_rx_cfg = priv->rx_cfg;
482 struct ethtool_channels old_settings;
483 int new_tx = cmd->tx_count;
484 int new_rx = cmd->rx_count;
486 gve_get_channels(netdev, &old_settings);
488 /* Changing combined is not allowed */
489 if (cmd->combined_count != old_settings.combined_count)
492 if (!new_rx || !new_tx)
495 if (priv->num_xdp_queues &&
496 (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
497 dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
501 if (!netif_running(netdev)) {
502 priv->tx_cfg.num_queues = new_tx;
503 priv->rx_cfg.num_queues = new_rx;
507 new_tx_cfg.num_queues = new_tx;
508 new_rx_cfg.num_queues = new_rx;
510 return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
513 static void gve_get_ringparam(struct net_device *netdev,
514 struct ethtool_ringparam *cmd,
515 struct kernel_ethtool_ringparam *kernel_cmd,
516 struct netlink_ext_ack *extack)
518 struct gve_priv *priv = netdev_priv(netdev);
520 cmd->rx_max_pending = priv->max_rx_desc_cnt;
521 cmd->tx_max_pending = priv->max_tx_desc_cnt;
522 cmd->rx_pending = priv->rx_desc_cnt;
523 cmd->tx_pending = priv->tx_desc_cnt;
525 if (!gve_header_split_supported(priv))
526 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
527 else if (priv->header_split_enabled)
528 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
530 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
533 static int gve_adjust_ring_sizes(struct gve_priv *priv,
537 struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
538 struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
541 /* get current queue configuration */
542 gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
544 /* copy over the new ring_size from ethtool */
545 tx_alloc_cfg.ring_size = new_tx_desc_cnt;
546 rx_alloc_cfg.ring_size = new_rx_desc_cnt;
548 if (netif_running(priv->dev)) {
549 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
554 /* Set new ring_size for the next up */
555 priv->tx_desc_cnt = new_tx_desc_cnt;
556 priv->rx_desc_cnt = new_rx_desc_cnt;
561 static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
564 /* check for valid range */
565 if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
566 new_tx_desc_cnt > priv->max_tx_desc_cnt ||
567 new_rx_desc_cnt < priv->min_rx_desc_cnt ||
568 new_rx_desc_cnt > priv->max_rx_desc_cnt) {
569 dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
573 if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
574 dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
580 static int gve_set_ringparam(struct net_device *netdev,
581 struct ethtool_ringparam *cmd,
582 struct kernel_ethtool_ringparam *kernel_cmd,
583 struct netlink_ext_ack *extack)
585 struct gve_priv *priv = netdev_priv(netdev);
586 u16 new_tx_cnt, new_rx_cnt;
589 err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
593 if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
596 if (!priv->modify_ring_size_enabled) {
597 dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
601 new_tx_cnt = cmd->tx_pending;
602 new_rx_cnt = cmd->rx_pending;
604 if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
607 return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
610 static int gve_user_reset(struct net_device *netdev, u32 *flags)
612 struct gve_priv *priv = netdev_priv(netdev);
614 if (*flags == ETH_RESET_ALL) {
616 return gve_reset(priv, true);
622 static int gve_get_tunable(struct net_device *netdev,
623 const struct ethtool_tunable *etuna, void *value)
625 struct gve_priv *priv = netdev_priv(netdev);
628 case ETHTOOL_RX_COPYBREAK:
629 *(u32 *)value = priv->rx_copybreak;
636 static int gve_set_tunable(struct net_device *netdev,
637 const struct ethtool_tunable *etuna,
640 struct gve_priv *priv = netdev_priv(netdev);
644 case ETHTOOL_RX_COPYBREAK:
646 u32 max_copybreak = gve_is_gqi(priv) ?
647 GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
650 if (len > max_copybreak)
652 priv->rx_copybreak = len;
660 static u32 gve_get_priv_flags(struct net_device *netdev)
662 struct gve_priv *priv = netdev_priv(netdev);
665 /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
666 if (priv->ethtool_flags & BIT(0))
671 static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
673 struct gve_priv *priv = netdev_priv(netdev);
674 u64 ori_flags, new_flags;
677 num_tx_queues = gve_num_tx_queues(priv);
678 ori_flags = READ_ONCE(priv->ethtool_flags);
679 new_flags = ori_flags;
681 /* Only one priv flag exists: report-stats (BIT(0))*/
685 new_flags &= ~(BIT(0));
686 priv->ethtool_flags = new_flags;
687 /* start report-stats timer when user turns report stats on. */
688 if (flags & BIT(0)) {
689 mod_timer(&priv->stats_report_timer,
690 round_jiffies(jiffies +
691 msecs_to_jiffies(priv->stats_report_timer_period)));
693 /* Zero off gve stats when report-stats turned off and */
694 /* delete report stats timer. */
695 if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
696 int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
698 int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
699 priv->rx_cfg.num_queues;
701 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
702 sizeof(struct stats));
703 del_timer_sync(&priv->stats_report_timer);
708 static int gve_get_link_ksettings(struct net_device *netdev,
709 struct ethtool_link_ksettings *cmd)
711 struct gve_priv *priv = netdev_priv(netdev);
714 if (priv->link_speed == 0)
715 err = gve_adminq_report_link_speed(priv);
717 cmd->base.speed = priv->link_speed;
719 cmd->base.duplex = DUPLEX_FULL;
724 static int gve_get_coalesce(struct net_device *netdev,
725 struct ethtool_coalesce *ec,
726 struct kernel_ethtool_coalesce *kernel_ec,
727 struct netlink_ext_ack *extack)
729 struct gve_priv *priv = netdev_priv(netdev);
731 if (gve_is_gqi(priv))
733 ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
734 ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
739 static int gve_set_coalesce(struct net_device *netdev,
740 struct ethtool_coalesce *ec,
741 struct kernel_ethtool_coalesce *kernel_ec,
742 struct netlink_ext_ack *extack)
744 struct gve_priv *priv = netdev_priv(netdev);
745 u32 tx_usecs_orig = priv->tx_coalesce_usecs;
746 u32 rx_usecs_orig = priv->rx_coalesce_usecs;
749 if (gve_is_gqi(priv))
752 if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
753 ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
755 priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
756 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
758 if (tx_usecs_orig != priv->tx_coalesce_usecs) {
759 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
760 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
761 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
763 gve_set_itr_coalesce_usecs_dqo(priv, block,
764 priv->tx_coalesce_usecs);
768 if (rx_usecs_orig != priv->rx_coalesce_usecs) {
769 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
770 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
771 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
773 gve_set_itr_coalesce_usecs_dqo(priv, block,
774 priv->rx_coalesce_usecs);
781 static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
783 struct gve_priv *priv = netdev_priv(netdev);
786 if (!(netdev->features & NETIF_F_NTUPLE))
790 case ETHTOOL_SRXCLSRLINS:
791 err = gve_add_flow_rule(priv, cmd);
793 case ETHTOOL_SRXCLSRLDEL:
794 err = gve_del_flow_rule(priv, cmd);
807 static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
809 struct gve_priv *priv = netdev_priv(netdev);
813 case ETHTOOL_GRXRINGS:
814 cmd->data = priv->rx_cfg.num_queues;
816 case ETHTOOL_GRXCLSRLCNT:
817 if (!priv->max_flow_rules)
820 err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0);
824 cmd->rule_cnt = priv->num_flow_rules;
825 cmd->data = priv->max_flow_rules;
827 case ETHTOOL_GRXCLSRULE:
828 err = gve_get_flow_rule_entry(priv, cmd);
830 case ETHTOOL_GRXCLSRLALL:
831 err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
844 static u32 gve_get_rxfh_key_size(struct net_device *netdev)
846 struct gve_priv *priv = netdev_priv(netdev);
848 return priv->rss_key_size;
851 static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
853 struct gve_priv *priv = netdev_priv(netdev);
855 return priv->rss_lut_size;
858 static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
860 struct gve_priv *priv = netdev_priv(netdev);
862 if (!priv->rss_key_size || !priv->rss_lut_size)
865 return gve_adminq_query_rss_config(priv, rxfh);
868 static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
869 struct netlink_ext_ack *extack)
871 struct gve_priv *priv = netdev_priv(netdev);
873 if (!priv->rss_key_size || !priv->rss_lut_size)
876 return gve_adminq_configure_rss(priv, rxfh);
879 const struct ethtool_ops gve_ethtool_ops = {
880 .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
881 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
882 .get_drvinfo = gve_get_drvinfo,
883 .get_strings = gve_get_strings,
884 .get_sset_count = gve_get_sset_count,
885 .get_ethtool_stats = gve_get_ethtool_stats,
886 .set_msglevel = gve_set_msglevel,
887 .get_msglevel = gve_get_msglevel,
888 .set_channels = gve_set_channels,
889 .get_channels = gve_get_channels,
890 .set_rxnfc = gve_set_rxnfc,
891 .get_rxnfc = gve_get_rxnfc,
892 .get_rxfh_indir_size = gve_get_rxfh_indir_size,
893 .get_rxfh_key_size = gve_get_rxfh_key_size,
894 .get_rxfh = gve_get_rxfh,
895 .set_rxfh = gve_set_rxfh,
896 .get_link = ethtool_op_get_link,
897 .get_coalesce = gve_get_coalesce,
898 .set_coalesce = gve_set_coalesce,
899 .get_ringparam = gve_get_ringparam,
900 .set_ringparam = gve_set_ringparam,
901 .reset = gve_user_reset,
902 .get_tunable = gve_get_tunable,
903 .set_tunable = gve_set_tunable,
904 .get_priv_flags = gve_get_priv_flags,
905 .set_priv_flags = gve_set_priv_flags,
906 .get_link_ksettings = gve_get_link_ksettings,
907 .get_ts_info = ethtool_op_get_ts_info,