]> Git Repo - J-linux.git/blob - drivers/net/ethernet/google/gve/gve_ethtool.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / ethernet / google / gve / gve_ethtool.c
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2024 Google LLC
5  */
6
7 #include <linux/rtnetlink.h>
8 #include "gve.h"
9 #include "gve_adminq.h"
10 #include "gve_dqo.h"
11 #include "gve_utils.h"
12
13 static void gve_get_drvinfo(struct net_device *netdev,
14                             struct ethtool_drvinfo *info)
15 {
16         struct gve_priv *priv = netdev_priv(netdev);
17
18         strscpy(info->driver, gve_driver_name, sizeof(info->driver));
19         strscpy(info->version, gve_version_str, sizeof(info->version));
20         strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
21 }
22
23 static void gve_set_msglevel(struct net_device *netdev, u32 value)
24 {
25         struct gve_priv *priv = netdev_priv(netdev);
26
27         priv->msg_enable = value;
28 }
29
30 static u32 gve_get_msglevel(struct net_device *netdev)
31 {
32         struct gve_priv *priv = netdev_priv(netdev);
33
34         return priv->msg_enable;
35 }
36
37 /* For the following stats column string names, make sure the order
38  * matches how it is filled in the code. For xdp_aborted, xdp_drop,
39  * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order
40  * as declared in enum xdp_action inside file uapi/linux/bpf.h .
41  */
42 static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
43         "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
44         "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
45         "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
46         "rx_hsplit_unsplit_pkt",
47         "interface_up_cnt", "interface_down_cnt", "reset_cnt",
48         "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
49 };
50
51 static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
52         "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
53         "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
54         "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
55         "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
56         "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
57         "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
58         "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
59         "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
60         "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
61 };
62
63 static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
64         "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
65         "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
66         "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
67         "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
68 };
69
70 static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
71         "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
72         "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
73         "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
74         "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
75         "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
76         "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
77         "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
78         "adminq_query_flow_rules", "adminq_cfg_flow_rule", "adminq_cfg_rss_cnt",
79         "adminq_query_rss_cnt",
80 };
81
82 static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
83         "report-stats",
84 };
85
86 #define GVE_MAIN_STATS_LEN  ARRAY_SIZE(gve_gstrings_main_stats)
87 #define GVE_ADMINQ_STATS_LEN  ARRAY_SIZE(gve_gstrings_adminq_stats)
88 #define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
89 #define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
90 #define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
91
92 static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
93 {
94         struct gve_priv *priv = netdev_priv(netdev);
95         u8 *s = (char *)data;
96         int num_tx_queues;
97         int i, j;
98
99         num_tx_queues = gve_num_tx_queues(priv);
100         switch (stringset) {
101         case ETH_SS_STATS:
102                 for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
103                         ethtool_puts(&s, gve_gstrings_main_stats[i]);
104
105                 for (i = 0; i < priv->rx_cfg.num_queues; i++)
106                         for (j = 0; j < NUM_GVE_RX_CNTS; j++)
107                                 ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
108                                                 i);
109
110                 for (i = 0; i < num_tx_queues; i++)
111                         for (j = 0; j < NUM_GVE_TX_CNTS; j++)
112                                 ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
113                                                 i);
114
115                 for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
116                         ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
117
118                 break;
119
120         case ETH_SS_PRIV_FLAGS:
121                 for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
122                         ethtool_puts(&s, gve_gstrings_priv_flags[i]);
123                 break;
124
125         default:
126                 break;
127         }
128 }
129
130 static int gve_get_sset_count(struct net_device *netdev, int sset)
131 {
132         struct gve_priv *priv = netdev_priv(netdev);
133         int num_tx_queues;
134
135         num_tx_queues = gve_num_tx_queues(priv);
136         switch (sset) {
137         case ETH_SS_STATS:
138                 return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
139                        (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
140                        (num_tx_queues * NUM_GVE_TX_CNTS);
141         case ETH_SS_PRIV_FLAGS:
142                 return GVE_PRIV_FLAGS_STR_LEN;
143         default:
144                 return -EOPNOTSUPP;
145         }
146 }
147
148 static void
149 gve_get_ethtool_stats(struct net_device *netdev,
150                       struct ethtool_stats *stats, u64 *data)
151 {
152         u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
153                 tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
154                 tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
155                 tmp_tx_pkts, tmp_tx_bytes;
156         u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
157                 rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
158                 tx_dropped;
159         int stats_idx, base_stats_idx, max_stats_idx;
160         struct stats *report_stats;
161         int *rx_qid_to_stats_idx;
162         int *tx_qid_to_stats_idx;
163         int num_stopped_rxqs = 0;
164         int num_stopped_txqs = 0;
165         struct gve_priv *priv;
166         bool skip_nic_stats;
167         unsigned int start;
168         int num_tx_queues;
169         int ring;
170         int i, j;
171
172         ASSERT_RTNL();
173
174         priv = netdev_priv(netdev);
175         num_tx_queues = gve_num_tx_queues(priv);
176         report_stats = priv->stats_report->stats;
177         rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
178                                             sizeof(int), GFP_KERNEL);
179         if (!rx_qid_to_stats_idx)
180                 return;
181         for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
182                 rx_qid_to_stats_idx[ring] = -1;
183                 if (!gve_rx_was_added_to_block(priv, ring))
184                         num_stopped_rxqs++;
185         }
186         tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
187                                             sizeof(int), GFP_KERNEL);
188         if (!tx_qid_to_stats_idx) {
189                 kfree(rx_qid_to_stats_idx);
190                 return;
191         }
192         for (ring = 0; ring < num_tx_queues; ring++) {
193                 tx_qid_to_stats_idx[ring] = -1;
194                 if (!gve_tx_was_added_to_block(priv, ring))
195                         num_stopped_txqs++;
196         }
197
198         for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
199              rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
200              rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
201              ring = 0;
202              ring < priv->rx_cfg.num_queues; ring++) {
203                 if (priv->rx) {
204                         do {
205                                 struct gve_rx_ring *rx = &priv->rx[ring];
206
207                                 start =
208                                   u64_stats_fetch_begin(&priv->rx[ring].statss);
209                                 tmp_rx_pkts = rx->rpackets;
210                                 tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
211                                 tmp_rx_bytes = rx->rbytes;
212                                 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
213                                 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
214                                 tmp_rx_desc_err_dropped_pkt =
215                                         rx->rx_desc_err_dropped_pkt;
216                                 tmp_rx_hsplit_unsplit_pkt =
217                                         rx->rx_hsplit_unsplit_pkt;
218                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
219                                                        start));
220                         rx_pkts += tmp_rx_pkts;
221                         rx_hsplit_pkt += tmp_rx_hsplit_pkt;
222                         rx_bytes += tmp_rx_bytes;
223                         rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
224                         rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
225                         rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
226                         rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
227                 }
228         }
229         for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
230              ring < num_tx_queues; ring++) {
231                 if (priv->tx) {
232                         do {
233                                 start =
234                                   u64_stats_fetch_begin(&priv->tx[ring].statss);
235                                 tmp_tx_pkts = priv->tx[ring].pkt_done;
236                                 tmp_tx_bytes = priv->tx[ring].bytes_done;
237                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
238                                                        start));
239                         tx_pkts += tmp_tx_pkts;
240                         tx_bytes += tmp_tx_bytes;
241                         tx_dropped += priv->tx[ring].dropped_pkt;
242                 }
243         }
244
245         i = 0;
246         data[i++] = rx_pkts;
247         data[i++] = rx_hsplit_pkt;
248         data[i++] = tx_pkts;
249         data[i++] = rx_bytes;
250         data[i++] = tx_bytes;
251         /* total rx dropped packets */
252         data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
253                     rx_desc_err_dropped_pkt;
254         data[i++] = tx_dropped;
255         data[i++] = priv->tx_timeo_cnt;
256         data[i++] = rx_skb_alloc_fail;
257         data[i++] = rx_buf_alloc_fail;
258         data[i++] = rx_desc_err_dropped_pkt;
259         data[i++] = rx_hsplit_unsplit_pkt;
260         data[i++] = priv->interface_up_cnt;
261         data[i++] = priv->interface_down_cnt;
262         data[i++] = priv->reset_cnt;
263         data[i++] = priv->page_alloc_fail;
264         data[i++] = priv->dma_mapping_error;
265         data[i++] = priv->stats_report_trigger_cnt;
266         i = GVE_MAIN_STATS_LEN;
267
268         /* For rx cross-reporting stats, start from nic rx stats in report */
269         base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
270                 GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
271         /* The boundary between driver stats and NIC stats shifts if there are
272          * stopped queues.
273          */
274         base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
275                 NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
276         max_stats_idx = NIC_RX_STATS_REPORT_NUM *
277                 (priv->rx_cfg.num_queues - num_stopped_rxqs) +
278                 base_stats_idx;
279         /* Preprocess the stats report for rx, map queue id to start index */
280         skip_nic_stats = false;
281         for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
282                 stats_idx += NIC_RX_STATS_REPORT_NUM) {
283                 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
284                 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
285
286                 if (stat_name == 0) {
287                         /* no stats written by NIC yet */
288                         skip_nic_stats = true;
289                         break;
290                 }
291                 if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
292                         net_err_ratelimited("Invalid rxq id in NIC stats\n");
293                         continue;
294                 }
295                 rx_qid_to_stats_idx[queue_id] = stats_idx;
296         }
297         /* walk RX rings */
298         if (priv->rx) {
299                 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
300                         struct gve_rx_ring *rx = &priv->rx[ring];
301
302                         data[i++] = rx->fill_cnt;
303                         data[i++] = rx->cnt;
304                         data[i++] = rx->fill_cnt - rx->cnt;
305                         do {
306                                 start =
307                                   u64_stats_fetch_begin(&priv->rx[ring].statss);
308                                 tmp_rx_bytes = rx->rbytes;
309                                 tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
310                                 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
311                                 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
312                                 tmp_rx_desc_err_dropped_pkt =
313                                         rx->rx_desc_err_dropped_pkt;
314                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
315                                                        start));
316                         data[i++] = tmp_rx_bytes;
317                         data[i++] = tmp_rx_hsplit_bytes;
318                         data[i++] = rx->rx_cont_packet_cnt;
319                         data[i++] = rx->rx_frag_flip_cnt;
320                         data[i++] = rx->rx_frag_copy_cnt;
321                         data[i++] = rx->rx_frag_alloc_cnt;
322                         /* rx dropped packets */
323                         data[i++] = tmp_rx_skb_alloc_fail +
324                                 tmp_rx_buf_alloc_fail +
325                                 tmp_rx_desc_err_dropped_pkt;
326                         data[i++] = rx->rx_copybreak_pkt;
327                         data[i++] = rx->rx_copied_pkt;
328                         /* stats from NIC */
329                         stats_idx = rx_qid_to_stats_idx[ring];
330                         if (skip_nic_stats || stats_idx < 0) {
331                                 /* skip NIC rx stats */
332                                 i += NIC_RX_STATS_REPORT_NUM;
333                         } else {
334                                 for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
335                                         u64 value =
336                                                 be64_to_cpu(report_stats[stats_idx + j].value);
337
338                                         data[i++] = value;
339                                 }
340                         }
341                         /* XDP rx counters */
342                         do {
343                                 start = u64_stats_fetch_begin(&priv->rx[ring].statss);
344                                 for (j = 0; j < GVE_XDP_ACTIONS; j++)
345                                         data[i + j] = rx->xdp_actions[j];
346                                 data[i + j++] = rx->xdp_tx_errors;
347                                 data[i + j++] = rx->xdp_redirect_errors;
348                                 data[i + j++] = rx->xdp_alloc_fails;
349                         } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
350                                                        start));
351                         i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
352                 }
353         } else {
354                 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
355         }
356
357         /* For tx cross-reporting stats, start from nic tx stats in report */
358         base_stats_idx = max_stats_idx;
359         max_stats_idx = NIC_TX_STATS_REPORT_NUM *
360                 (num_tx_queues - num_stopped_txqs) +
361                 max_stats_idx;
362         /* Preprocess the stats report for tx, map queue id to start index */
363         skip_nic_stats = false;
364         for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
365                 stats_idx += NIC_TX_STATS_REPORT_NUM) {
366                 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
367                 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
368
369                 if (stat_name == 0) {
370                         /* no stats written by NIC yet */
371                         skip_nic_stats = true;
372                         break;
373                 }
374                 if (queue_id < 0 || queue_id >= num_tx_queues) {
375                         net_err_ratelimited("Invalid txq id in NIC stats\n");
376                         continue;
377                 }
378                 tx_qid_to_stats_idx[queue_id] = stats_idx;
379         }
380         /* walk TX rings */
381         if (priv->tx) {
382                 for (ring = 0; ring < num_tx_queues; ring++) {
383                         struct gve_tx_ring *tx = &priv->tx[ring];
384
385                         if (gve_is_gqi(priv)) {
386                                 data[i++] = tx->req;
387                                 data[i++] = tx->done;
388                                 data[i++] = tx->req - tx->done;
389                         } else {
390                                 /* DQO doesn't currently support
391                                  * posted/completed descriptor counts;
392                                  */
393                                 data[i++] = 0;
394                                 data[i++] = 0;
395                                 data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
396                         }
397                         do {
398                                 start =
399                                   u64_stats_fetch_begin(&priv->tx[ring].statss);
400                                 tmp_tx_bytes = tx->bytes_done;
401                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
402                                                        start));
403                         data[i++] = tmp_tx_bytes;
404                         data[i++] = tx->wake_queue;
405                         data[i++] = tx->stop_queue;
406                         data[i++] = gve_tx_load_event_counter(priv, tx);
407                         data[i++] = tx->dma_mapping_error;
408                         /* stats from NIC */
409                         stats_idx = tx_qid_to_stats_idx[ring];
410                         if (skip_nic_stats || stats_idx < 0) {
411                                 /* skip NIC tx stats */
412                                 i += NIC_TX_STATS_REPORT_NUM;
413                         } else {
414                                 for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
415                                         u64 value =
416                                                 be64_to_cpu(report_stats[stats_idx + j].value);
417                                         data[i++] = value;
418                                 }
419                         }
420                         /* XDP xsk counters */
421                         data[i++] = tx->xdp_xsk_wakeup;
422                         data[i++] = tx->xdp_xsk_done;
423                         do {
424                                 start = u64_stats_fetch_begin(&priv->tx[ring].statss);
425                                 data[i] = tx->xdp_xsk_sent;
426                                 data[i + 1] = tx->xdp_xmit;
427                                 data[i + 2] = tx->xdp_xmit_errors;
428                         } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
429                                                        start));
430                         i += 3; /* XDP tx counters */
431                 }
432         } else {
433                 i += num_tx_queues * NUM_GVE_TX_CNTS;
434         }
435
436         kfree(rx_qid_to_stats_idx);
437         kfree(tx_qid_to_stats_idx);
438         /* AQ Stats */
439         data[i++] = priv->adminq_prod_cnt;
440         data[i++] = priv->adminq_cmd_fail;
441         data[i++] = priv->adminq_timeouts;
442         data[i++] = priv->adminq_describe_device_cnt;
443         data[i++] = priv->adminq_cfg_device_resources_cnt;
444         data[i++] = priv->adminq_register_page_list_cnt;
445         data[i++] = priv->adminq_unregister_page_list_cnt;
446         data[i++] = priv->adminq_create_tx_queue_cnt;
447         data[i++] = priv->adminq_create_rx_queue_cnt;
448         data[i++] = priv->adminq_destroy_tx_queue_cnt;
449         data[i++] = priv->adminq_destroy_rx_queue_cnt;
450         data[i++] = priv->adminq_dcfg_device_resources_cnt;
451         data[i++] = priv->adminq_set_driver_parameter_cnt;
452         data[i++] = priv->adminq_report_stats_cnt;
453         data[i++] = priv->adminq_report_link_speed_cnt;
454         data[i++] = priv->adminq_get_ptype_map_cnt;
455         data[i++] = priv->adminq_query_flow_rules_cnt;
456         data[i++] = priv->adminq_cfg_flow_rule_cnt;
457         data[i++] = priv->adminq_cfg_rss_cnt;
458         data[i++] = priv->adminq_query_rss_cnt;
459 }
460
461 static void gve_get_channels(struct net_device *netdev,
462                              struct ethtool_channels *cmd)
463 {
464         struct gve_priv *priv = netdev_priv(netdev);
465
466         cmd->max_rx = priv->rx_cfg.max_queues;
467         cmd->max_tx = priv->tx_cfg.max_queues;
468         cmd->max_other = 0;
469         cmd->max_combined = 0;
470         cmd->rx_count = priv->rx_cfg.num_queues;
471         cmd->tx_count = priv->tx_cfg.num_queues;
472         cmd->other_count = 0;
473         cmd->combined_count = 0;
474 }
475
476 static int gve_set_channels(struct net_device *netdev,
477                             struct ethtool_channels *cmd)
478 {
479         struct gve_priv *priv = netdev_priv(netdev);
480         struct gve_queue_config new_tx_cfg = priv->tx_cfg;
481         struct gve_queue_config new_rx_cfg = priv->rx_cfg;
482         struct ethtool_channels old_settings;
483         int new_tx = cmd->tx_count;
484         int new_rx = cmd->rx_count;
485
486         gve_get_channels(netdev, &old_settings);
487
488         /* Changing combined is not allowed */
489         if (cmd->combined_count != old_settings.combined_count)
490                 return -EINVAL;
491
492         if (!new_rx || !new_tx)
493                 return -EINVAL;
494
495         if (priv->num_xdp_queues &&
496             (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
497                 dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
498                 return -EINVAL;
499         }
500
501         if (!netif_running(netdev)) {
502                 priv->tx_cfg.num_queues = new_tx;
503                 priv->rx_cfg.num_queues = new_rx;
504                 return 0;
505         }
506
507         new_tx_cfg.num_queues = new_tx;
508         new_rx_cfg.num_queues = new_rx;
509
510         return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
511 }
512
513 static void gve_get_ringparam(struct net_device *netdev,
514                               struct ethtool_ringparam *cmd,
515                               struct kernel_ethtool_ringparam *kernel_cmd,
516                               struct netlink_ext_ack *extack)
517 {
518         struct gve_priv *priv = netdev_priv(netdev);
519
520         cmd->rx_max_pending = priv->max_rx_desc_cnt;
521         cmd->tx_max_pending = priv->max_tx_desc_cnt;
522         cmd->rx_pending = priv->rx_desc_cnt;
523         cmd->tx_pending = priv->tx_desc_cnt;
524
525         if (!gve_header_split_supported(priv))
526                 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
527         else if (priv->header_split_enabled)
528                 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
529         else
530                 kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
531 }
532
533 static int gve_adjust_ring_sizes(struct gve_priv *priv,
534                                  u16 new_tx_desc_cnt,
535                                  u16 new_rx_desc_cnt)
536 {
537         struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
538         struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
539         int err;
540
541         /* get current queue configuration */
542         gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
543
544         /* copy over the new ring_size from ethtool */
545         tx_alloc_cfg.ring_size = new_tx_desc_cnt;
546         rx_alloc_cfg.ring_size = new_rx_desc_cnt;
547
548         if (netif_running(priv->dev)) {
549                 err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
550                 if (err)
551                         return err;
552         }
553
554         /* Set new ring_size for the next up */
555         priv->tx_desc_cnt = new_tx_desc_cnt;
556         priv->rx_desc_cnt = new_rx_desc_cnt;
557
558         return 0;
559 }
560
561 static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
562                                       u16 new_rx_desc_cnt)
563 {
564         /* check for valid range */
565         if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
566             new_tx_desc_cnt > priv->max_tx_desc_cnt ||
567             new_rx_desc_cnt < priv->min_rx_desc_cnt ||
568             new_rx_desc_cnt > priv->max_rx_desc_cnt) {
569                 dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
570                 return -EINVAL;
571         }
572
573         if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
574                 dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
575                 return -EINVAL;
576         }
577         return 0;
578 }
579
580 static int gve_set_ringparam(struct net_device *netdev,
581                              struct ethtool_ringparam *cmd,
582                              struct kernel_ethtool_ringparam *kernel_cmd,
583                              struct netlink_ext_ack *extack)
584 {
585         struct gve_priv *priv = netdev_priv(netdev);
586         u16 new_tx_cnt, new_rx_cnt;
587         int err;
588
589         err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
590         if (err)
591                 return err;
592
593         if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
594                 return 0;
595
596         if (!priv->modify_ring_size_enabled) {
597                 dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
598                 return -EOPNOTSUPP;
599         }
600
601         new_tx_cnt = cmd->tx_pending;
602         new_rx_cnt = cmd->rx_pending;
603
604         if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
605                 return -EINVAL;
606
607         return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
608 }
609
610 static int gve_user_reset(struct net_device *netdev, u32 *flags)
611 {
612         struct gve_priv *priv = netdev_priv(netdev);
613
614         if (*flags == ETH_RESET_ALL) {
615                 *flags = 0;
616                 return gve_reset(priv, true);
617         }
618
619         return -EOPNOTSUPP;
620 }
621
622 static int gve_get_tunable(struct net_device *netdev,
623                            const struct ethtool_tunable *etuna, void *value)
624 {
625         struct gve_priv *priv = netdev_priv(netdev);
626
627         switch (etuna->id) {
628         case ETHTOOL_RX_COPYBREAK:
629                 *(u32 *)value = priv->rx_copybreak;
630                 return 0;
631         default:
632                 return -EOPNOTSUPP;
633         }
634 }
635
636 static int gve_set_tunable(struct net_device *netdev,
637                            const struct ethtool_tunable *etuna,
638                            const void *value)
639 {
640         struct gve_priv *priv = netdev_priv(netdev);
641         u32 len;
642
643         switch (etuna->id) {
644         case ETHTOOL_RX_COPYBREAK:
645         {
646                 u32 max_copybreak = gve_is_gqi(priv) ?
647                         GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
648
649                 len = *(u32 *)value;
650                 if (len > max_copybreak)
651                         return -EINVAL;
652                 priv->rx_copybreak = len;
653                 return 0;
654         }
655         default:
656                 return -EOPNOTSUPP;
657         }
658 }
659
660 static u32 gve_get_priv_flags(struct net_device *netdev)
661 {
662         struct gve_priv *priv = netdev_priv(netdev);
663         u32 ret_flags = 0;
664
665         /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
666         if (priv->ethtool_flags & BIT(0))
667                 ret_flags |= BIT(0);
668         return ret_flags;
669 }
670
671 static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
672 {
673         struct gve_priv *priv = netdev_priv(netdev);
674         u64 ori_flags, new_flags;
675         int num_tx_queues;
676
677         num_tx_queues = gve_num_tx_queues(priv);
678         ori_flags = READ_ONCE(priv->ethtool_flags);
679         new_flags = ori_flags;
680
681         /* Only one priv flag exists: report-stats (BIT(0))*/
682         if (flags & BIT(0))
683                 new_flags |= BIT(0);
684         else
685                 new_flags &= ~(BIT(0));
686         priv->ethtool_flags = new_flags;
687         /* start report-stats timer when user turns report stats on. */
688         if (flags & BIT(0)) {
689                 mod_timer(&priv->stats_report_timer,
690                           round_jiffies(jiffies +
691                                         msecs_to_jiffies(priv->stats_report_timer_period)));
692         }
693         /* Zero off gve stats when report-stats turned off and */
694         /* delete report stats timer. */
695         if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
696                 int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
697                         num_tx_queues;
698                 int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
699                         priv->rx_cfg.num_queues;
700
701                 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
702                                    sizeof(struct stats));
703                 del_timer_sync(&priv->stats_report_timer);
704         }
705         return 0;
706 }
707
708 static int gve_get_link_ksettings(struct net_device *netdev,
709                                   struct ethtool_link_ksettings *cmd)
710 {
711         struct gve_priv *priv = netdev_priv(netdev);
712         int err = 0;
713
714         if (priv->link_speed == 0)
715                 err = gve_adminq_report_link_speed(priv);
716
717         cmd->base.speed = priv->link_speed;
718
719         cmd->base.duplex = DUPLEX_FULL;
720
721         return err;
722 }
723
724 static int gve_get_coalesce(struct net_device *netdev,
725                             struct ethtool_coalesce *ec,
726                             struct kernel_ethtool_coalesce *kernel_ec,
727                             struct netlink_ext_ack *extack)
728 {
729         struct gve_priv *priv = netdev_priv(netdev);
730
731         if (gve_is_gqi(priv))
732                 return -EOPNOTSUPP;
733         ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
734         ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
735
736         return 0;
737 }
738
739 static int gve_set_coalesce(struct net_device *netdev,
740                             struct ethtool_coalesce *ec,
741                             struct kernel_ethtool_coalesce *kernel_ec,
742                             struct netlink_ext_ack *extack)
743 {
744         struct gve_priv *priv = netdev_priv(netdev);
745         u32 tx_usecs_orig = priv->tx_coalesce_usecs;
746         u32 rx_usecs_orig = priv->rx_coalesce_usecs;
747         int idx;
748
749         if (gve_is_gqi(priv))
750                 return -EOPNOTSUPP;
751
752         if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
753             ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
754                 return -EINVAL;
755         priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
756         priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
757
758         if (tx_usecs_orig != priv->tx_coalesce_usecs) {
759                 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
760                         int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
761                         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
762
763                         gve_set_itr_coalesce_usecs_dqo(priv, block,
764                                                        priv->tx_coalesce_usecs);
765                 }
766         }
767
768         if (rx_usecs_orig != priv->rx_coalesce_usecs) {
769                 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
770                         int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
771                         struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
772
773                         gve_set_itr_coalesce_usecs_dqo(priv, block,
774                                                        priv->rx_coalesce_usecs);
775                 }
776         }
777
778         return 0;
779 }
780
781 static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
782 {
783         struct gve_priv *priv = netdev_priv(netdev);
784         int err = 0;
785
786         if (!(netdev->features & NETIF_F_NTUPLE))
787                 return -EOPNOTSUPP;
788
789         switch (cmd->cmd) {
790         case ETHTOOL_SRXCLSRLINS:
791                 err = gve_add_flow_rule(priv, cmd);
792                 break;
793         case ETHTOOL_SRXCLSRLDEL:
794                 err = gve_del_flow_rule(priv, cmd);
795                 break;
796         case ETHTOOL_SRXFH:
797                 err = -EOPNOTSUPP;
798                 break;
799         default:
800                 err = -EOPNOTSUPP;
801                 break;
802         }
803
804         return err;
805 }
806
807 static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
808 {
809         struct gve_priv *priv = netdev_priv(netdev);
810         int err = 0;
811
812         switch (cmd->cmd) {
813         case ETHTOOL_GRXRINGS:
814                 cmd->data = priv->rx_cfg.num_queues;
815                 break;
816         case ETHTOOL_GRXCLSRLCNT:
817                 if (!priv->max_flow_rules)
818                         return -EOPNOTSUPP;
819
820                 err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0);
821                 if (err)
822                         return err;
823
824                 cmd->rule_cnt = priv->num_flow_rules;
825                 cmd->data = priv->max_flow_rules;
826                 break;
827         case ETHTOOL_GRXCLSRULE:
828                 err = gve_get_flow_rule_entry(priv, cmd);
829                 break;
830         case ETHTOOL_GRXCLSRLALL:
831                 err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
832                 break;
833         case ETHTOOL_GRXFH:
834                 err = -EOPNOTSUPP;
835                 break;
836         default:
837                 err = -EOPNOTSUPP;
838                 break;
839         }
840
841         return err;
842 }
843
844 static u32 gve_get_rxfh_key_size(struct net_device *netdev)
845 {
846         struct gve_priv *priv = netdev_priv(netdev);
847
848         return priv->rss_key_size;
849 }
850
851 static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
852 {
853         struct gve_priv *priv = netdev_priv(netdev);
854
855         return priv->rss_lut_size;
856 }
857
858 static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
859 {
860         struct gve_priv *priv = netdev_priv(netdev);
861
862         if (!priv->rss_key_size || !priv->rss_lut_size)
863                 return -EOPNOTSUPP;
864
865         return gve_adminq_query_rss_config(priv, rxfh);
866 }
867
868 static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
869                         struct netlink_ext_ack *extack)
870 {
871         struct gve_priv *priv = netdev_priv(netdev);
872
873         if (!priv->rss_key_size || !priv->rss_lut_size)
874                 return -EOPNOTSUPP;
875
876         return gve_adminq_configure_rss(priv, rxfh);
877 }
878
879 const struct ethtool_ops gve_ethtool_ops = {
880         .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
881         .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
882         .get_drvinfo = gve_get_drvinfo,
883         .get_strings = gve_get_strings,
884         .get_sset_count = gve_get_sset_count,
885         .get_ethtool_stats = gve_get_ethtool_stats,
886         .set_msglevel = gve_set_msglevel,
887         .get_msglevel = gve_get_msglevel,
888         .set_channels = gve_set_channels,
889         .get_channels = gve_get_channels,
890         .set_rxnfc = gve_set_rxnfc,
891         .get_rxnfc = gve_get_rxnfc,
892         .get_rxfh_indir_size = gve_get_rxfh_indir_size,
893         .get_rxfh_key_size = gve_get_rxfh_key_size,
894         .get_rxfh = gve_get_rxfh,
895         .set_rxfh = gve_set_rxfh,
896         .get_link = ethtool_op_get_link,
897         .get_coalesce = gve_get_coalesce,
898         .set_coalesce = gve_set_coalesce,
899         .get_ringparam = gve_get_ringparam,
900         .set_ringparam = gve_set_ringparam,
901         .reset = gve_user_reset,
902         .get_tunable = gve_get_tunable,
903         .set_tunable = gve_set_tunable,
904         .get_priv_flags = gve_get_priv_flags,
905         .set_priv_flags = gve_set_priv_flags,
906         .get_link_ksettings = gve_get_link_ksettings,
907         .get_ts_info = ethtool_op_get_ts_info,
908 };
This page took 0.078394 seconds and 4 git commands to generate.