]> Git Repo - linux.git/blob - drivers/net/virtio_net.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux.git] / drivers / net / virtio_net.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* A network driver using virtio.
3  *
4  * Copyright 2007 Rusty Russell <[email protected]> IBM Corporation
5  */
6 //#define DEBUG
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/module.h>
11 #include <linux/virtio.h>
12 #include <linux/virtio_net.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/scatterlist.h>
16 #include <linux/if_vlan.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/average.h>
20 #include <linux/filter.h>
21 #include <linux/kernel.h>
22 #include <linux/dim.h>
23 #include <net/route.h>
24 #include <net/xdp.h>
25 #include <net/net_failover.h>
26 #include <net/netdev_rx_queue.h>
27 #include <net/netdev_queues.h>
28
29 static int napi_weight = NAPI_POLL_WEIGHT;
30 module_param(napi_weight, int, 0444);
31
32 static bool csum = true, gso = true, napi_tx = true;
33 module_param(csum, bool, 0444);
34 module_param(gso, bool, 0444);
35 module_param(napi_tx, bool, 0644);
36
37 /* FIXME: MTU in config. */
38 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
39 #define GOOD_COPY_LEN   128
40
41 #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
42
43 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
44 #define VIRTIO_XDP_HEADROOM 256
45
46 /* Separating two types of XDP xmit */
47 #define VIRTIO_XDP_TX           BIT(0)
48 #define VIRTIO_XDP_REDIR        BIT(1)
49
50 #define VIRTIO_XDP_FLAG BIT(0)
51
52 /* RX packet size EWMA. The average packet size is used to determine the packet
53  * buffer size when refilling RX rings. As the entire RX ring may be refilled
54  * at once, the weight is chosen so that the EWMA will be insensitive to short-
55  * term, transient changes in packet size.
56  */
57 DECLARE_EWMA(pkt_len, 0, 64)
58
59 #define VIRTNET_DRIVER_VERSION "1.0.0"
60
61 static const unsigned long guest_offloads[] = {
62         VIRTIO_NET_F_GUEST_TSO4,
63         VIRTIO_NET_F_GUEST_TSO6,
64         VIRTIO_NET_F_GUEST_ECN,
65         VIRTIO_NET_F_GUEST_UFO,
66         VIRTIO_NET_F_GUEST_CSUM,
67         VIRTIO_NET_F_GUEST_USO4,
68         VIRTIO_NET_F_GUEST_USO6,
69         VIRTIO_NET_F_GUEST_HDRLEN
70 };
71
72 #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
73                                 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
74                                 (1ULL << VIRTIO_NET_F_GUEST_ECN)  | \
75                                 (1ULL << VIRTIO_NET_F_GUEST_UFO)  | \
76                                 (1ULL << VIRTIO_NET_F_GUEST_USO4) | \
77                                 (1ULL << VIRTIO_NET_F_GUEST_USO6))
78
79 struct virtnet_stat_desc {
80         char desc[ETH_GSTRING_LEN];
81         size_t offset;
82         size_t qstat_offset;
83 };
84
85 struct virtnet_sq_free_stats {
86         u64 packets;
87         u64 bytes;
88 };
89
90 struct virtnet_sq_stats {
91         struct u64_stats_sync syncp;
92         u64_stats_t packets;
93         u64_stats_t bytes;
94         u64_stats_t xdp_tx;
95         u64_stats_t xdp_tx_drops;
96         u64_stats_t kicks;
97         u64_stats_t tx_timeouts;
98         u64_stats_t stop;
99         u64_stats_t wake;
100 };
101
102 struct virtnet_rq_stats {
103         struct u64_stats_sync syncp;
104         u64_stats_t packets;
105         u64_stats_t bytes;
106         u64_stats_t drops;
107         u64_stats_t xdp_packets;
108         u64_stats_t xdp_tx;
109         u64_stats_t xdp_redirects;
110         u64_stats_t xdp_drops;
111         u64_stats_t kicks;
112 };
113
114 #define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
115 #define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
116
117 #define VIRTNET_SQ_STAT_QSTAT(name, m)                          \
118         {                                                       \
119                 name,                                           \
120                 offsetof(struct virtnet_sq_stats, m),           \
121                 offsetof(struct netdev_queue_stats_tx, m),      \
122         }
123
124 #define VIRTNET_RQ_STAT_QSTAT(name, m)                          \
125         {                                                       \
126                 name,                                           \
127                 offsetof(struct virtnet_rq_stats, m),           \
128                 offsetof(struct netdev_queue_stats_rx, m),      \
129         }
130
131 static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
132         VIRTNET_SQ_STAT("xdp_tx",       xdp_tx),
133         VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
134         VIRTNET_SQ_STAT("kicks",        kicks),
135         VIRTNET_SQ_STAT("tx_timeouts",  tx_timeouts),
136 };
137
138 static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
139         VIRTNET_RQ_STAT("drops",         drops),
140         VIRTNET_RQ_STAT("xdp_packets",   xdp_packets),
141         VIRTNET_RQ_STAT("xdp_tx",        xdp_tx),
142         VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects),
143         VIRTNET_RQ_STAT("xdp_drops",     xdp_drops),
144         VIRTNET_RQ_STAT("kicks",         kicks),
145 };
146
147 static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
148         VIRTNET_SQ_STAT_QSTAT("packets", packets),
149         VIRTNET_SQ_STAT_QSTAT("bytes",   bytes),
150         VIRTNET_SQ_STAT_QSTAT("stop",    stop),
151         VIRTNET_SQ_STAT_QSTAT("wake",    wake),
152 };
153
154 static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
155         VIRTNET_RQ_STAT_QSTAT("packets", packets),
156         VIRTNET_RQ_STAT_QSTAT("bytes",   bytes),
157 };
158
159 #define VIRTNET_STATS_DESC_CQ(name) \
160         {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
161
162 #define VIRTNET_STATS_DESC_RX(class, name) \
163         {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
164
165 #define VIRTNET_STATS_DESC_TX(class, name) \
166         {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
167
168
169 static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
170         VIRTNET_STATS_DESC_CQ(command_num),
171         VIRTNET_STATS_DESC_CQ(ok_num),
172 };
173
174 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
175         VIRTNET_STATS_DESC_RX(basic, packets),
176         VIRTNET_STATS_DESC_RX(basic, bytes),
177
178         VIRTNET_STATS_DESC_RX(basic, notifications),
179         VIRTNET_STATS_DESC_RX(basic, interrupts),
180 };
181
182 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
183         VIRTNET_STATS_DESC_TX(basic, packets),
184         VIRTNET_STATS_DESC_TX(basic, bytes),
185
186         VIRTNET_STATS_DESC_TX(basic, notifications),
187         VIRTNET_STATS_DESC_TX(basic, interrupts),
188 };
189
190 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
191         VIRTNET_STATS_DESC_RX(csum, needs_csum),
192 };
193
194 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
195         VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
196         VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
197 };
198
199 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
200         VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
201 };
202
203 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
204         VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
205 };
206
207 #define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field)                   \
208         {                                                                       \
209                 #name,                                                          \
210                 offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name),    \
211                 offsetof(struct netdev_queue_stats_rx, qstat_field),            \
212         }
213
214 #define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field)                   \
215         {                                                                       \
216                 #name,                                                          \
217                 offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name),    \
218                 offsetof(struct netdev_queue_stats_tx, qstat_field),            \
219         }
220
221 static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = {
222         VIRTNET_STATS_DESC_RX_QSTAT(basic, drops,         hw_drops),
223         VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns),
224 };
225
226 static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = {
227         VIRTNET_STATS_DESC_TX_QSTAT(basic, drops,          hw_drops),
228         VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors),
229 };
230
231 static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = {
232         VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
233         VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none,  csum_none),
234         VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad,   csum_bad),
235 };
236
237 static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = {
238         VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none,  csum_none),
239         VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
240 };
241
242 static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = {
243         VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets,           hw_gro_packets),
244         VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes,             hw_gro_bytes),
245         VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
246         VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced,   hw_gro_wire_bytes),
247 };
248
249 static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = {
250         VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets,        hw_gso_packets),
251         VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes,          hw_gso_bytes),
252         VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments,       hw_gso_wire_packets),
253         VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
254 };
255
256 static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = {
257         VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
258 };
259
260 static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
261         VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
262 };
263
264 #define VIRTNET_Q_TYPE_RX 0
265 #define VIRTNET_Q_TYPE_TX 1
266 #define VIRTNET_Q_TYPE_CQ 2
267
268 struct virtnet_interrupt_coalesce {
269         u32 max_packets;
270         u32 max_usecs;
271 };
272
273 /* The dma information of pages allocated at a time. */
274 struct virtnet_rq_dma {
275         dma_addr_t addr;
276         u32 ref;
277         u16 len;
278         u16 need_sync;
279 };
280
281 /* Internal representation of a send virtqueue */
282 struct send_queue {
283         /* Virtqueue associated with this send _queue */
284         struct virtqueue *vq;
285
286         /* TX: fragments + linear part + virtio header */
287         struct scatterlist sg[MAX_SKB_FRAGS + 2];
288
289         /* Name of the send queue: output.$index */
290         char name[16];
291
292         struct virtnet_sq_stats stats;
293
294         struct virtnet_interrupt_coalesce intr_coal;
295
296         struct napi_struct napi;
297
298         /* Record whether sq is in reset state. */
299         bool reset;
300 };
301
302 /* Internal representation of a receive virtqueue */
303 struct receive_queue {
304         /* Virtqueue associated with this receive_queue */
305         struct virtqueue *vq;
306
307         struct napi_struct napi;
308
309         struct bpf_prog __rcu *xdp_prog;
310
311         struct virtnet_rq_stats stats;
312
313         /* The number of rx notifications */
314         u16 calls;
315
316         /* Is dynamic interrupt moderation enabled? */
317         bool dim_enabled;
318
319         /* Used to protect dim_enabled and inter_coal */
320         struct mutex dim_lock;
321
322         /* Dynamic Interrupt Moderation */
323         struct dim dim;
324
325         u32 packets_in_napi;
326
327         struct virtnet_interrupt_coalesce intr_coal;
328
329         /* Chain pages by the private ptr. */
330         struct page *pages;
331
332         /* Average packet length for mergeable receive buffers. */
333         struct ewma_pkt_len mrg_avg_pkt_len;
334
335         /* Page frag for packet buffer allocation. */
336         struct page_frag alloc_frag;
337
338         /* RX: fragments + linear part + virtio header */
339         struct scatterlist sg[MAX_SKB_FRAGS + 2];
340
341         /* Min single buffer size for mergeable buffers case. */
342         unsigned int min_buf_len;
343
344         /* Name of this receive queue: input.$index */
345         char name[16];
346
347         struct xdp_rxq_info xdp_rxq;
348
349         /* Record the last dma info to free after new pages is allocated. */
350         struct virtnet_rq_dma *last_dma;
351 };
352
353 /* This structure can contain rss message with maximum settings for indirection table and keysize
354  * Note, that default structure that describes RSS configuration virtio_net_rss_config
355  * contains same info but can't handle table values.
356  * In any case, structure would be passed to virtio hw through sg_buf split by parts
357  * because table sizes may be differ according to the device configuration.
358  */
359 #define VIRTIO_NET_RSS_MAX_KEY_SIZE     40
360 #define VIRTIO_NET_RSS_MAX_TABLE_LEN    128
361 struct virtio_net_ctrl_rss {
362         u32 hash_types;
363         u16 indirection_table_mask;
364         u16 unclassified_queue;
365         u16 indirection_table[VIRTIO_NET_RSS_MAX_TABLE_LEN];
366         u16 max_tx_vq;
367         u8 hash_key_length;
368         u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
369 };
370
371 /* Control VQ buffers: protected by the rtnl lock */
372 struct control_buf {
373         struct virtio_net_ctrl_hdr hdr;
374         virtio_net_ctrl_ack status;
375 };
376
377 struct virtnet_info {
378         struct virtio_device *vdev;
379         struct virtqueue *cvq;
380         struct net_device *dev;
381         struct send_queue *sq;
382         struct receive_queue *rq;
383         unsigned int status;
384
385         /* Max # of queue pairs supported by the device */
386         u16 max_queue_pairs;
387
388         /* # of queue pairs currently used by the driver */
389         u16 curr_queue_pairs;
390
391         /* # of XDP queue pairs currently used by the driver */
392         u16 xdp_queue_pairs;
393
394         /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
395         bool xdp_enabled;
396
397         /* I like... big packets and I cannot lie! */
398         bool big_packets;
399
400         /* number of sg entries allocated for big packets */
401         unsigned int big_packets_num_skbfrags;
402
403         /* Host will merge rx buffers for big packets (shake it! shake it!) */
404         bool mergeable_rx_bufs;
405
406         /* Host supports rss and/or hash report */
407         bool has_rss;
408         bool has_rss_hash_report;
409         u8 rss_key_size;
410         u16 rss_indir_table_size;
411         u32 rss_hash_types_supported;
412         u32 rss_hash_types_saved;
413         struct virtio_net_ctrl_rss rss;
414
415         /* Has control virtqueue */
416         bool has_cvq;
417
418         /* Lock to protect the control VQ */
419         struct mutex cvq_lock;
420
421         /* Host can handle any s/g split between our header and packet data */
422         bool any_header_sg;
423
424         /* Packet virtio header size */
425         u8 hdr_len;
426
427         /* Work struct for delayed refilling if we run low on memory. */
428         struct delayed_work refill;
429
430         /* Is delayed refill enabled? */
431         bool refill_enabled;
432
433         /* The lock to synchronize the access to refill_enabled */
434         spinlock_t refill_lock;
435
436         /* Work struct for config space updates */
437         struct work_struct config_work;
438
439         /* Work struct for setting rx mode */
440         struct work_struct rx_mode_work;
441
442         /* OK to queue work setting RX mode? */
443         bool rx_mode_work_enabled;
444
445         /* Does the affinity hint is set for virtqueues? */
446         bool affinity_hint_set;
447
448         /* CPU hotplug instances for online & dead */
449         struct hlist_node node;
450         struct hlist_node node_dead;
451
452         struct control_buf *ctrl;
453
454         /* Ethtool settings */
455         u8 duplex;
456         u32 speed;
457
458         /* Is rx dynamic interrupt moderation enabled? */
459         bool rx_dim_enabled;
460
461         /* Interrupt coalescing settings */
462         struct virtnet_interrupt_coalesce intr_coal_tx;
463         struct virtnet_interrupt_coalesce intr_coal_rx;
464
465         unsigned long guest_offloads;
466         unsigned long guest_offloads_capable;
467
468         /* failover when STANDBY feature enabled */
469         struct failover *failover;
470
471         u64 device_stats_cap;
472 };
473
474 struct padded_vnet_hdr {
475         struct virtio_net_hdr_v1_hash hdr;
476         /*
477          * hdr is in a separate sg buffer, and data sg buffer shares same page
478          * with this header sg. This padding makes next sg 16 byte aligned
479          * after the header.
480          */
481         char padding[12];
482 };
483
484 struct virtio_net_common_hdr {
485         union {
486                 struct virtio_net_hdr hdr;
487                 struct virtio_net_hdr_mrg_rxbuf mrg_hdr;
488                 struct virtio_net_hdr_v1_hash hash_v1_hdr;
489         };
490 };
491
492 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
493
494 static bool is_xdp_frame(void *ptr)
495 {
496         return (unsigned long)ptr & VIRTIO_XDP_FLAG;
497 }
498
499 static void *xdp_to_ptr(struct xdp_frame *ptr)
500 {
501         return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
502 }
503
504 static struct xdp_frame *ptr_to_xdp(void *ptr)
505 {
506         return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
507 }
508
509 static void __free_old_xmit(struct send_queue *sq, bool in_napi,
510                             struct virtnet_sq_free_stats *stats)
511 {
512         unsigned int len;
513         void *ptr;
514
515         while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
516                 ++stats->packets;
517
518                 if (!is_xdp_frame(ptr)) {
519                         struct sk_buff *skb = ptr;
520
521                         pr_debug("Sent skb %p\n", skb);
522
523                         stats->bytes += skb->len;
524                         napi_consume_skb(skb, in_napi);
525                 } else {
526                         struct xdp_frame *frame = ptr_to_xdp(ptr);
527
528                         stats->bytes += xdp_get_frame_len(frame);
529                         xdp_return_frame(frame);
530                 }
531         }
532 }
533
534 /* Converting between virtqueue no. and kernel tx/rx queue no.
535  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
536  */
537 static int vq2txq(struct virtqueue *vq)
538 {
539         return (vq->index - 1) / 2;
540 }
541
542 static int txq2vq(int txq)
543 {
544         return txq * 2 + 1;
545 }
546
547 static int vq2rxq(struct virtqueue *vq)
548 {
549         return vq->index / 2;
550 }
551
552 static int rxq2vq(int rxq)
553 {
554         return rxq * 2;
555 }
556
557 static int vq_type(struct virtnet_info *vi, int qid)
558 {
559         if (qid == vi->max_queue_pairs * 2)
560                 return VIRTNET_Q_TYPE_CQ;
561
562         if (qid % 2)
563                 return VIRTNET_Q_TYPE_TX;
564
565         return VIRTNET_Q_TYPE_RX;
566 }
567
568 static inline struct virtio_net_common_hdr *
569 skb_vnet_common_hdr(struct sk_buff *skb)
570 {
571         return (struct virtio_net_common_hdr *)skb->cb;
572 }
573
574 /*
575  * private is used to chain pages for big packets, put the whole
576  * most recent used list in the beginning for reuse
577  */
578 static void give_pages(struct receive_queue *rq, struct page *page)
579 {
580         struct page *end;
581
582         /* Find end of list, sew whole thing into vi->rq.pages. */
583         for (end = page; end->private; end = (struct page *)end->private);
584         end->private = (unsigned long)rq->pages;
585         rq->pages = page;
586 }
587
588 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
589 {
590         struct page *p = rq->pages;
591
592         if (p) {
593                 rq->pages = (struct page *)p->private;
594                 /* clear private here, it is used to chain pages */
595                 p->private = 0;
596         } else
597                 p = alloc_page(gfp_mask);
598         return p;
599 }
600
601 static void virtnet_rq_free_buf(struct virtnet_info *vi,
602                                 struct receive_queue *rq, void *buf)
603 {
604         if (vi->mergeable_rx_bufs)
605                 put_page(virt_to_head_page(buf));
606         else if (vi->big_packets)
607                 give_pages(rq, buf);
608         else
609                 put_page(virt_to_head_page(buf));
610 }
611
612 static void enable_delayed_refill(struct virtnet_info *vi)
613 {
614         spin_lock_bh(&vi->refill_lock);
615         vi->refill_enabled = true;
616         spin_unlock_bh(&vi->refill_lock);
617 }
618
619 static void disable_delayed_refill(struct virtnet_info *vi)
620 {
621         spin_lock_bh(&vi->refill_lock);
622         vi->refill_enabled = false;
623         spin_unlock_bh(&vi->refill_lock);
624 }
625
626 static void enable_rx_mode_work(struct virtnet_info *vi)
627 {
628         rtnl_lock();
629         vi->rx_mode_work_enabled = true;
630         rtnl_unlock();
631 }
632
633 static void disable_rx_mode_work(struct virtnet_info *vi)
634 {
635         rtnl_lock();
636         vi->rx_mode_work_enabled = false;
637         rtnl_unlock();
638 }
639
640 static void virtqueue_napi_schedule(struct napi_struct *napi,
641                                     struct virtqueue *vq)
642 {
643         if (napi_schedule_prep(napi)) {
644                 virtqueue_disable_cb(vq);
645                 __napi_schedule(napi);
646         }
647 }
648
649 static bool virtqueue_napi_complete(struct napi_struct *napi,
650                                     struct virtqueue *vq, int processed)
651 {
652         int opaque;
653
654         opaque = virtqueue_enable_cb_prepare(vq);
655         if (napi_complete_done(napi, processed)) {
656                 if (unlikely(virtqueue_poll(vq, opaque)))
657                         virtqueue_napi_schedule(napi, vq);
658                 else
659                         return true;
660         } else {
661                 virtqueue_disable_cb(vq);
662         }
663
664         return false;
665 }
666
667 static void skb_xmit_done(struct virtqueue *vq)
668 {
669         struct virtnet_info *vi = vq->vdev->priv;
670         struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
671
672         /* Suppress further interrupts. */
673         virtqueue_disable_cb(vq);
674
675         if (napi->weight)
676                 virtqueue_napi_schedule(napi, vq);
677         else
678                 /* We were probably waiting for more output buffers. */
679                 netif_wake_subqueue(vi->dev, vq2txq(vq));
680 }
681
682 #define MRG_CTX_HEADER_SHIFT 22
683 static void *mergeable_len_to_ctx(unsigned int truesize,
684                                   unsigned int headroom)
685 {
686         return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
687 }
688
689 static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
690 {
691         return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
692 }
693
694 static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
695 {
696         return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
697 }
698
699 static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen,
700                                          unsigned int headroom,
701                                          unsigned int len)
702 {
703         struct sk_buff *skb;
704
705         skb = build_skb(buf, buflen);
706         if (unlikely(!skb))
707                 return NULL;
708
709         skb_reserve(skb, headroom);
710         skb_put(skb, len);
711
712         return skb;
713 }
714
715 /* Called from bottom half context */
716 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
717                                    struct receive_queue *rq,
718                                    struct page *page, unsigned int offset,
719                                    unsigned int len, unsigned int truesize,
720                                    unsigned int headroom)
721 {
722         struct sk_buff *skb;
723         struct virtio_net_common_hdr *hdr;
724         unsigned int copy, hdr_len, hdr_padded_len;
725         struct page *page_to_free = NULL;
726         int tailroom, shinfo_size;
727         char *p, *hdr_p, *buf;
728
729         p = page_address(page) + offset;
730         hdr_p = p;
731
732         hdr_len = vi->hdr_len;
733         if (vi->mergeable_rx_bufs)
734                 hdr_padded_len = hdr_len;
735         else
736                 hdr_padded_len = sizeof(struct padded_vnet_hdr);
737
738         buf = p - headroom;
739         len -= hdr_len;
740         offset += hdr_padded_len;
741         p += hdr_padded_len;
742         tailroom = truesize - headroom  - hdr_padded_len - len;
743
744         shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
745
746         if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
747                 skb = virtnet_build_skb(buf, truesize, p - buf, len);
748                 if (unlikely(!skb))
749                         return NULL;
750
751                 page = (struct page *)page->private;
752                 if (page)
753                         give_pages(rq, page);
754                 goto ok;
755         }
756
757         /* copy small packet so we can reuse these pages for small data */
758         skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
759         if (unlikely(!skb))
760                 return NULL;
761
762         /* Copy all frame if it fits skb->head, otherwise
763          * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
764          */
765         if (len <= skb_tailroom(skb))
766                 copy = len;
767         else
768                 copy = ETH_HLEN;
769         skb_put_data(skb, p, copy);
770
771         len -= copy;
772         offset += copy;
773
774         if (vi->mergeable_rx_bufs) {
775                 if (len)
776                         skb_add_rx_frag(skb, 0, page, offset, len, truesize);
777                 else
778                         page_to_free = page;
779                 goto ok;
780         }
781
782         /*
783          * Verify that we can indeed put this data into a skb.
784          * This is here to handle cases when the device erroneously
785          * tries to receive more than is possible. This is usually
786          * the case of a broken device.
787          */
788         if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
789                 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
790                 dev_kfree_skb(skb);
791                 return NULL;
792         }
793         BUG_ON(offset >= PAGE_SIZE);
794         while (len) {
795                 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
796                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
797                                 frag_size, truesize);
798                 len -= frag_size;
799                 page = (struct page *)page->private;
800                 offset = 0;
801         }
802
803         if (page)
804                 give_pages(rq, page);
805
806 ok:
807         hdr = skb_vnet_common_hdr(skb);
808         memcpy(hdr, hdr_p, hdr_len);
809         if (page_to_free)
810                 put_page(page_to_free);
811
812         return skb;
813 }
814
815 static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
816 {
817         struct page *page = virt_to_head_page(buf);
818         struct virtnet_rq_dma *dma;
819         void *head;
820         int offset;
821
822         head = page_address(page);
823
824         dma = head;
825
826         --dma->ref;
827
828         if (dma->need_sync && len) {
829                 offset = buf - (head + sizeof(*dma));
830
831                 virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
832                                                         offset, len,
833                                                         DMA_FROM_DEVICE);
834         }
835
836         if (dma->ref)
837                 return;
838
839         virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
840                                          DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
841         put_page(page);
842 }
843
844 static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
845 {
846         void *buf;
847
848         buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
849         if (buf)
850                 virtnet_rq_unmap(rq, buf, *len);
851
852         return buf;
853 }
854
855 static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
856 {
857         struct virtnet_rq_dma *dma;
858         dma_addr_t addr;
859         u32 offset;
860         void *head;
861
862         head = page_address(rq->alloc_frag.page);
863
864         offset = buf - head;
865
866         dma = head;
867
868         addr = dma->addr - sizeof(*dma) + offset;
869
870         sg_init_table(rq->sg, 1);
871         rq->sg[0].dma_address = addr;
872         rq->sg[0].length = len;
873 }
874
875 static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
876 {
877         struct page_frag *alloc_frag = &rq->alloc_frag;
878         struct virtnet_rq_dma *dma;
879         void *buf, *head;
880         dma_addr_t addr;
881
882         if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
883                 return NULL;
884
885         head = page_address(alloc_frag->page);
886
887         dma = head;
888
889         /* new pages */
890         if (!alloc_frag->offset) {
891                 if (rq->last_dma) {
892                         /* Now, the new page is allocated, the last dma
893                          * will not be used. So the dma can be unmapped
894                          * if the ref is 0.
895                          */
896                         virtnet_rq_unmap(rq, rq->last_dma, 0);
897                         rq->last_dma = NULL;
898                 }
899
900                 dma->len = alloc_frag->size - sizeof(*dma);
901
902                 addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
903                                                       dma->len, DMA_FROM_DEVICE, 0);
904                 if (virtqueue_dma_mapping_error(rq->vq, addr))
905                         return NULL;
906
907                 dma->addr = addr;
908                 dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
909
910                 /* Add a reference to dma to prevent the entire dma from
911                  * being released during error handling. This reference
912                  * will be freed after the pages are no longer used.
913                  */
914                 get_page(alloc_frag->page);
915                 dma->ref = 1;
916                 alloc_frag->offset = sizeof(*dma);
917
918                 rq->last_dma = dma;
919         }
920
921         ++dma->ref;
922
923         buf = head + alloc_frag->offset;
924
925         get_page(alloc_frag->page);
926         alloc_frag->offset += size;
927
928         return buf;
929 }
930
931 static void virtnet_rq_set_premapped(struct virtnet_info *vi)
932 {
933         int i;
934
935         /* disable for big mode */
936         if (!vi->mergeable_rx_bufs && vi->big_packets)
937                 return;
938
939         for (i = 0; i < vi->max_queue_pairs; i++)
940                 /* error should never happen */
941                 BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
942 }
943
944 static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
945 {
946         struct virtnet_info *vi = vq->vdev->priv;
947         struct receive_queue *rq;
948         int i = vq2rxq(vq);
949
950         rq = &vi->rq[i];
951
952         if (!vi->big_packets || vi->mergeable_rx_bufs)
953                 virtnet_rq_unmap(rq, buf, 0);
954
955         virtnet_rq_free_buf(vi, rq, buf);
956 }
957
958 static void free_old_xmit(struct send_queue *sq, bool in_napi)
959 {
960         struct virtnet_sq_free_stats stats = {0};
961
962         __free_old_xmit(sq, in_napi, &stats);
963
964         /* Avoid overhead when no packets have been processed
965          * happens when called speculatively from start_xmit.
966          */
967         if (!stats.packets)
968                 return;
969
970         u64_stats_update_begin(&sq->stats.syncp);
971         u64_stats_add(&sq->stats.bytes, stats.bytes);
972         u64_stats_add(&sq->stats.packets, stats.packets);
973         u64_stats_update_end(&sq->stats.syncp);
974 }
975
976 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
977 {
978         if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
979                 return false;
980         else if (q < vi->curr_queue_pairs)
981                 return true;
982         else
983                 return false;
984 }
985
986 static void check_sq_full_and_disable(struct virtnet_info *vi,
987                                       struct net_device *dev,
988                                       struct send_queue *sq)
989 {
990         bool use_napi = sq->napi.weight;
991         int qnum;
992
993         qnum = sq - vi->sq;
994
995         /* If running out of space, stop queue to avoid getting packets that we
996          * are then unable to transmit.
997          * An alternative would be to force queuing layer to requeue the skb by
998          * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
999          * returned in a normal path of operation: it means that driver is not
1000          * maintaining the TX queue stop/start state properly, and causes
1001          * the stack to do a non-trivial amount of useless work.
1002          * Since most packets only take 1 or 2 ring slots, stopping the queue
1003          * early means 16 slots are typically wasted.
1004          */
1005         if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1006                 netif_stop_subqueue(dev, qnum);
1007                 u64_stats_update_begin(&sq->stats.syncp);
1008                 u64_stats_inc(&sq->stats.stop);
1009                 u64_stats_update_end(&sq->stats.syncp);
1010                 if (use_napi) {
1011                         if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
1012                                 virtqueue_napi_schedule(&sq->napi, sq->vq);
1013                 } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1014                         /* More just got used, free them then recheck. */
1015                         free_old_xmit(sq, false);
1016                         if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1017                                 netif_start_subqueue(dev, qnum);
1018                                 u64_stats_update_begin(&sq->stats.syncp);
1019                                 u64_stats_inc(&sq->stats.wake);
1020                                 u64_stats_update_end(&sq->stats.syncp);
1021                                 virtqueue_disable_cb(sq->vq);
1022                         }
1023                 }
1024         }
1025 }
1026
1027 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
1028                                    struct send_queue *sq,
1029                                    struct xdp_frame *xdpf)
1030 {
1031         struct virtio_net_hdr_mrg_rxbuf *hdr;
1032         struct skb_shared_info *shinfo;
1033         u8 nr_frags = 0;
1034         int err, i;
1035
1036         if (unlikely(xdpf->headroom < vi->hdr_len))
1037                 return -EOVERFLOW;
1038
1039         if (unlikely(xdp_frame_has_frags(xdpf))) {
1040                 shinfo = xdp_get_shared_info_from_frame(xdpf);
1041                 nr_frags = shinfo->nr_frags;
1042         }
1043
1044         /* In wrapping function virtnet_xdp_xmit(), we need to free
1045          * up the pending old buffers, where we need to calculate the
1046          * position of skb_shared_info in xdp_get_frame_len() and
1047          * xdp_return_frame(), which will involve to xdpf->data and
1048          * xdpf->headroom. Therefore, we need to update the value of
1049          * headroom synchronously here.
1050          */
1051         xdpf->headroom -= vi->hdr_len;
1052         xdpf->data -= vi->hdr_len;
1053         /* Zero header and leave csum up to XDP layers */
1054         hdr = xdpf->data;
1055         memset(hdr, 0, vi->hdr_len);
1056         xdpf->len   += vi->hdr_len;
1057
1058         sg_init_table(sq->sg, nr_frags + 1);
1059         sg_set_buf(sq->sg, xdpf->data, xdpf->len);
1060         for (i = 0; i < nr_frags; i++) {
1061                 skb_frag_t *frag = &shinfo->frags[i];
1062
1063                 sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
1064                             skb_frag_size(frag), skb_frag_off(frag));
1065         }
1066
1067         err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
1068                                    xdp_to_ptr(xdpf), GFP_ATOMIC);
1069         if (unlikely(err))
1070                 return -ENOSPC; /* Caller handle free/refcnt */
1071
1072         return 0;
1073 }
1074
1075 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1076  * the current cpu, so it does not need to be locked.
1077  *
1078  * Here we use marco instead of inline functions because we have to deal with
1079  * three issues at the same time: 1. the choice of sq. 2. judge and execute the
1080  * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
1081  * functions to perfectly solve these three problems at the same time.
1082  */
1083 #define virtnet_xdp_get_sq(vi) ({                                       \
1084         int cpu = smp_processor_id();                                   \
1085         struct netdev_queue *txq;                                       \
1086         typeof(vi) v = (vi);                                            \
1087         unsigned int qp;                                                \
1088                                                                         \
1089         if (v->curr_queue_pairs > nr_cpu_ids) {                         \
1090                 qp = v->curr_queue_pairs - v->xdp_queue_pairs;          \
1091                 qp += cpu;                                              \
1092                 txq = netdev_get_tx_queue(v->dev, qp);                  \
1093                 __netif_tx_acquire(txq);                                \
1094         } else {                                                        \
1095                 qp = cpu % v->curr_queue_pairs;                         \
1096                 txq = netdev_get_tx_queue(v->dev, qp);                  \
1097                 __netif_tx_lock(txq, cpu);                              \
1098         }                                                               \
1099         v->sq + qp;                                                     \
1100 })
1101
1102 #define virtnet_xdp_put_sq(vi, q) {                                     \
1103         struct netdev_queue *txq;                                       \
1104         typeof(vi) v = (vi);                                            \
1105                                                                         \
1106         txq = netdev_get_tx_queue(v->dev, (q) - v->sq);                 \
1107         if (v->curr_queue_pairs > nr_cpu_ids)                           \
1108                 __netif_tx_release(txq);                                \
1109         else                                                            \
1110                 __netif_tx_unlock(txq);                                 \
1111 }
1112
1113 static int virtnet_xdp_xmit(struct net_device *dev,
1114                             int n, struct xdp_frame **frames, u32 flags)
1115 {
1116         struct virtnet_info *vi = netdev_priv(dev);
1117         struct virtnet_sq_free_stats stats = {0};
1118         struct receive_queue *rq = vi->rq;
1119         struct bpf_prog *xdp_prog;
1120         struct send_queue *sq;
1121         int nxmit = 0;
1122         int kicks = 0;
1123         int ret;
1124         int i;
1125
1126         /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
1127          * indicate XDP resources have been successfully allocated.
1128          */
1129         xdp_prog = rcu_access_pointer(rq->xdp_prog);
1130         if (!xdp_prog)
1131                 return -ENXIO;
1132
1133         sq = virtnet_xdp_get_sq(vi);
1134
1135         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
1136                 ret = -EINVAL;
1137                 goto out;
1138         }
1139
1140         /* Free up any pending old buffers before queueing new ones. */
1141         __free_old_xmit(sq, false, &stats);
1142
1143         for (i = 0; i < n; i++) {
1144                 struct xdp_frame *xdpf = frames[i];
1145
1146                 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
1147                         break;
1148                 nxmit++;
1149         }
1150         ret = nxmit;
1151
1152         if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
1153                 check_sq_full_and_disable(vi, dev, sq);
1154
1155         if (flags & XDP_XMIT_FLUSH) {
1156                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
1157                         kicks = 1;
1158         }
1159 out:
1160         u64_stats_update_begin(&sq->stats.syncp);
1161         u64_stats_add(&sq->stats.bytes, stats.bytes);
1162         u64_stats_add(&sq->stats.packets, stats.packets);
1163         u64_stats_add(&sq->stats.xdp_tx, n);
1164         u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
1165         u64_stats_add(&sq->stats.kicks, kicks);
1166         u64_stats_update_end(&sq->stats.syncp);
1167
1168         virtnet_xdp_put_sq(vi, sq);
1169         return ret;
1170 }
1171
1172 static void put_xdp_frags(struct xdp_buff *xdp)
1173 {
1174         struct skb_shared_info *shinfo;
1175         struct page *xdp_page;
1176         int i;
1177
1178         if (xdp_buff_has_frags(xdp)) {
1179                 shinfo = xdp_get_shared_info_from_buff(xdp);
1180                 for (i = 0; i < shinfo->nr_frags; i++) {
1181                         xdp_page = skb_frag_page(&shinfo->frags[i]);
1182                         put_page(xdp_page);
1183                 }
1184         }
1185 }
1186
1187 static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
1188                                struct net_device *dev,
1189                                unsigned int *xdp_xmit,
1190                                struct virtnet_rq_stats *stats)
1191 {
1192         struct xdp_frame *xdpf;
1193         int err;
1194         u32 act;
1195
1196         act = bpf_prog_run_xdp(xdp_prog, xdp);
1197         u64_stats_inc(&stats->xdp_packets);
1198
1199         switch (act) {
1200         case XDP_PASS:
1201                 return act;
1202
1203         case XDP_TX:
1204                 u64_stats_inc(&stats->xdp_tx);
1205                 xdpf = xdp_convert_buff_to_frame(xdp);
1206                 if (unlikely(!xdpf)) {
1207                         netdev_dbg(dev, "convert buff to frame failed for xdp\n");
1208                         return XDP_DROP;
1209                 }
1210
1211                 err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
1212                 if (unlikely(!err)) {
1213                         xdp_return_frame_rx_napi(xdpf);
1214                 } else if (unlikely(err < 0)) {
1215                         trace_xdp_exception(dev, xdp_prog, act);
1216                         return XDP_DROP;
1217                 }
1218                 *xdp_xmit |= VIRTIO_XDP_TX;
1219                 return act;
1220
1221         case XDP_REDIRECT:
1222                 u64_stats_inc(&stats->xdp_redirects);
1223                 err = xdp_do_redirect(dev, xdp, xdp_prog);
1224                 if (err)
1225                         return XDP_DROP;
1226
1227                 *xdp_xmit |= VIRTIO_XDP_REDIR;
1228                 return act;
1229
1230         default:
1231                 bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
1232                 fallthrough;
1233         case XDP_ABORTED:
1234                 trace_xdp_exception(dev, xdp_prog, act);
1235                 fallthrough;
1236         case XDP_DROP:
1237                 return XDP_DROP;
1238         }
1239 }
1240
1241 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1242 {
1243         return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1244 }
1245
1246 /* We copy the packet for XDP in the following cases:
1247  *
1248  * 1) Packet is scattered across multiple rx buffers.
1249  * 2) Headroom space is insufficient.
1250  *
1251  * This is inefficient but it's a temporary condition that
1252  * we hit right after XDP is enabled and until queue is refilled
1253  * with large buffers with sufficient headroom - so it should affect
1254  * at most queue size packets.
1255  * Afterwards, the conditions to enable
1256  * XDP should preclude the underlying device from sending packets
1257  * across multiple buffers (num_buf > 1), and we make sure buffers
1258  * have enough headroom.
1259  */
1260 static struct page *xdp_linearize_page(struct receive_queue *rq,
1261                                        int *num_buf,
1262                                        struct page *p,
1263                                        int offset,
1264                                        int page_off,
1265                                        unsigned int *len)
1266 {
1267         int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1268         struct page *page;
1269
1270         if (page_off + *len + tailroom > PAGE_SIZE)
1271                 return NULL;
1272
1273         page = alloc_page(GFP_ATOMIC);
1274         if (!page)
1275                 return NULL;
1276
1277         memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
1278         page_off += *len;
1279
1280         while (--*num_buf) {
1281                 unsigned int buflen;
1282                 void *buf;
1283                 int off;
1284
1285                 buf = virtnet_rq_get_buf(rq, &buflen, NULL);
1286                 if (unlikely(!buf))
1287                         goto err_buf;
1288
1289                 p = virt_to_head_page(buf);
1290                 off = buf - page_address(p);
1291
1292                 /* guard against a misconfigured or uncooperative backend that
1293                  * is sending packet larger than the MTU.
1294                  */
1295                 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
1296                         put_page(p);
1297                         goto err_buf;
1298                 }
1299
1300                 memcpy(page_address(page) + page_off,
1301                        page_address(p) + off, buflen);
1302                 page_off += buflen;
1303                 put_page(p);
1304         }
1305
1306         /* Headroom does not contribute to packet length */
1307         *len = page_off - VIRTIO_XDP_HEADROOM;
1308         return page;
1309 err_buf:
1310         __free_pages(page, 0);
1311         return NULL;
1312 }
1313
1314 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1315                                                unsigned int xdp_headroom,
1316                                                void *buf,
1317                                                unsigned int len)
1318 {
1319         unsigned int header_offset;
1320         unsigned int headroom;
1321         unsigned int buflen;
1322         struct sk_buff *skb;
1323
1324         header_offset = VIRTNET_RX_PAD + xdp_headroom;
1325         headroom = vi->hdr_len + header_offset;
1326         buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1327                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1328
1329         skb = virtnet_build_skb(buf, buflen, headroom, len);
1330         if (unlikely(!skb))
1331                 return NULL;
1332
1333         buf += header_offset;
1334         memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1335
1336         return skb;
1337 }
1338
1339 static struct sk_buff *receive_small_xdp(struct net_device *dev,
1340                                          struct virtnet_info *vi,
1341                                          struct receive_queue *rq,
1342                                          struct bpf_prog *xdp_prog,
1343                                          void *buf,
1344                                          unsigned int xdp_headroom,
1345                                          unsigned int len,
1346                                          unsigned int *xdp_xmit,
1347                                          struct virtnet_rq_stats *stats)
1348 {
1349         unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
1350         unsigned int headroom = vi->hdr_len + header_offset;
1351         struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
1352         struct page *page = virt_to_head_page(buf);
1353         struct page *xdp_page;
1354         unsigned int buflen;
1355         struct xdp_buff xdp;
1356         struct sk_buff *skb;
1357         unsigned int metasize = 0;
1358         u32 act;
1359
1360         if (unlikely(hdr->hdr.gso_type))
1361                 goto err_xdp;
1362
1363         buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1364                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1365
1366         if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1367                 int offset = buf - page_address(page) + header_offset;
1368                 unsigned int tlen = len + vi->hdr_len;
1369                 int num_buf = 1;
1370
1371                 xdp_headroom = virtnet_get_headroom(vi);
1372                 header_offset = VIRTNET_RX_PAD + xdp_headroom;
1373                 headroom = vi->hdr_len + header_offset;
1374                 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
1375                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1376                 xdp_page = xdp_linearize_page(rq, &num_buf, page,
1377                                               offset, header_offset,
1378                                               &tlen);
1379                 if (!xdp_page)
1380                         goto err_xdp;
1381
1382                 buf = page_address(xdp_page);
1383                 put_page(page);
1384                 page = xdp_page;
1385         }
1386
1387         xdp_init_buff(&xdp, buflen, &rq->xdp_rxq);
1388         xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1389                          xdp_headroom, len, true);
1390
1391         act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1392
1393         switch (act) {
1394         case XDP_PASS:
1395                 /* Recalculate length in case bpf program changed it */
1396                 len = xdp.data_end - xdp.data;
1397                 metasize = xdp.data - xdp.data_meta;
1398                 break;
1399
1400         case XDP_TX:
1401         case XDP_REDIRECT:
1402                 goto xdp_xmit;
1403
1404         default:
1405                 goto err_xdp;
1406         }
1407
1408         skb = virtnet_build_skb(buf, buflen, xdp.data - buf, len);
1409         if (unlikely(!skb))
1410                 goto err;
1411
1412         if (metasize)
1413                 skb_metadata_set(skb, metasize);
1414
1415         return skb;
1416
1417 err_xdp:
1418         u64_stats_inc(&stats->xdp_drops);
1419 err:
1420         u64_stats_inc(&stats->drops);
1421         put_page(page);
1422 xdp_xmit:
1423         return NULL;
1424 }
1425
1426 static struct sk_buff *receive_small(struct net_device *dev,
1427                                      struct virtnet_info *vi,
1428                                      struct receive_queue *rq,
1429                                      void *buf, void *ctx,
1430                                      unsigned int len,
1431                                      unsigned int *xdp_xmit,
1432                                      struct virtnet_rq_stats *stats)
1433 {
1434         unsigned int xdp_headroom = (unsigned long)ctx;
1435         struct page *page = virt_to_head_page(buf);
1436         struct sk_buff *skb;
1437
1438         len -= vi->hdr_len;
1439         u64_stats_add(&stats->bytes, len);
1440
1441         if (unlikely(len > GOOD_PACKET_LEN)) {
1442                 pr_debug("%s: rx error: len %u exceeds max size %d\n",
1443                          dev->name, len, GOOD_PACKET_LEN);
1444                 DEV_STATS_INC(dev, rx_length_errors);
1445                 goto err;
1446         }
1447
1448         if (unlikely(vi->xdp_enabled)) {
1449                 struct bpf_prog *xdp_prog;
1450
1451                 rcu_read_lock();
1452                 xdp_prog = rcu_dereference(rq->xdp_prog);
1453                 if (xdp_prog) {
1454                         skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1455                                                 xdp_headroom, len, xdp_xmit,
1456                                                 stats);
1457                         rcu_read_unlock();
1458                         return skb;
1459                 }
1460                 rcu_read_unlock();
1461         }
1462
1463         skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
1464         if (likely(skb))
1465                 return skb;
1466
1467 err:
1468         u64_stats_inc(&stats->drops);
1469         put_page(page);
1470         return NULL;
1471 }
1472
1473 static struct sk_buff *receive_big(struct net_device *dev,
1474                                    struct virtnet_info *vi,
1475                                    struct receive_queue *rq,
1476                                    void *buf,
1477                                    unsigned int len,
1478                                    struct virtnet_rq_stats *stats)
1479 {
1480         struct page *page = buf;
1481         struct sk_buff *skb =
1482                 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1483
1484         u64_stats_add(&stats->bytes, len - vi->hdr_len);
1485         if (unlikely(!skb))
1486                 goto err;
1487
1488         return skb;
1489
1490 err:
1491         u64_stats_inc(&stats->drops);
1492         give_pages(rq, page);
1493         return NULL;
1494 }
1495
1496 static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
1497                                struct net_device *dev,
1498                                struct virtnet_rq_stats *stats)
1499 {
1500         struct page *page;
1501         void *buf;
1502         int len;
1503
1504         while (num_buf-- > 1) {
1505                 buf = virtnet_rq_get_buf(rq, &len, NULL);
1506                 if (unlikely(!buf)) {
1507                         pr_debug("%s: rx error: %d buffers missing\n",
1508                                  dev->name, num_buf);
1509                         DEV_STATS_INC(dev, rx_length_errors);
1510                         break;
1511                 }
1512                 u64_stats_add(&stats->bytes, len);
1513                 page = virt_to_head_page(buf);
1514                 put_page(page);
1515         }
1516 }
1517
1518 /* Why not use xdp_build_skb_from_frame() ?
1519  * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
1520  * virtio-net there are 2 points that do not match its requirements:
1521  *  1. The size of the prefilled buffer is not fixed before xdp is set.
1522  *  2. xdp_build_skb_from_frame() does more checks that we don't need,
1523  *     like eth_type_trans() (which virtio-net does in receive_buf()).
1524  */
1525 static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
1526                                                struct virtnet_info *vi,
1527                                                struct xdp_buff *xdp,
1528                                                unsigned int xdp_frags_truesz)
1529 {
1530         struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
1531         unsigned int headroom, data_len;
1532         struct sk_buff *skb;
1533         int metasize;
1534         u8 nr_frags;
1535
1536         if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
1537                 pr_debug("Error building skb as missing reserved tailroom for xdp");
1538                 return NULL;
1539         }
1540
1541         if (unlikely(xdp_buff_has_frags(xdp)))
1542                 nr_frags = sinfo->nr_frags;
1543
1544         skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
1545         if (unlikely(!skb))
1546                 return NULL;
1547
1548         headroom = xdp->data - xdp->data_hard_start;
1549         data_len = xdp->data_end - xdp->data;
1550         skb_reserve(skb, headroom);
1551         __skb_put(skb, data_len);
1552
1553         metasize = xdp->data - xdp->data_meta;
1554         metasize = metasize > 0 ? metasize : 0;
1555         if (metasize)
1556                 skb_metadata_set(skb, metasize);
1557
1558         if (unlikely(xdp_buff_has_frags(xdp)))
1559                 xdp_update_skb_shared_info(skb, nr_frags,
1560                                            sinfo->xdp_frags_size,
1561                                            xdp_frags_truesz,
1562                                            xdp_buff_is_frag_pfmemalloc(xdp));
1563
1564         return skb;
1565 }
1566
1567 /* TODO: build xdp in big mode */
1568 static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
1569                                       struct virtnet_info *vi,
1570                                       struct receive_queue *rq,
1571                                       struct xdp_buff *xdp,
1572                                       void *buf,
1573                                       unsigned int len,
1574                                       unsigned int frame_sz,
1575                                       int *num_buf,
1576                                       unsigned int *xdp_frags_truesize,
1577                                       struct virtnet_rq_stats *stats)
1578 {
1579         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1580         unsigned int headroom, tailroom, room;
1581         unsigned int truesize, cur_frag_size;
1582         struct skb_shared_info *shinfo;
1583         unsigned int xdp_frags_truesz = 0;
1584         struct page *page;
1585         skb_frag_t *frag;
1586         int offset;
1587         void *ctx;
1588
1589         xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
1590         xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
1591                          VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1592
1593         if (!*num_buf)
1594                 return 0;
1595
1596         if (*num_buf > 1) {
1597                 /* If we want to build multi-buffer xdp, we need
1598                  * to specify that the flags of xdp_buff have the
1599                  * XDP_FLAGS_HAS_FRAG bit.
1600                  */
1601                 if (!xdp_buff_has_frags(xdp))
1602                         xdp_buff_set_frags_flag(xdp);
1603
1604                 shinfo = xdp_get_shared_info_from_buff(xdp);
1605                 shinfo->nr_frags = 0;
1606                 shinfo->xdp_frags_size = 0;
1607         }
1608
1609         if (*num_buf > MAX_SKB_FRAGS + 1)
1610                 return -EINVAL;
1611
1612         while (--*num_buf > 0) {
1613                 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1614                 if (unlikely(!buf)) {
1615                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
1616                                  dev->name, *num_buf,
1617                                  virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1618                         DEV_STATS_INC(dev, rx_length_errors);
1619                         goto err;
1620                 }
1621
1622                 u64_stats_add(&stats->bytes, len);
1623                 page = virt_to_head_page(buf);
1624                 offset = buf - page_address(page);
1625
1626                 truesize = mergeable_ctx_to_truesize(ctx);
1627                 headroom = mergeable_ctx_to_headroom(ctx);
1628                 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1629                 room = SKB_DATA_ALIGN(headroom + tailroom);
1630
1631                 cur_frag_size = truesize;
1632                 xdp_frags_truesz += cur_frag_size;
1633                 if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
1634                         put_page(page);
1635                         pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1636                                  dev->name, len, (unsigned long)(truesize - room));
1637                         DEV_STATS_INC(dev, rx_length_errors);
1638                         goto err;
1639                 }
1640
1641                 frag = &shinfo->frags[shinfo->nr_frags++];
1642                 skb_frag_fill_page_desc(frag, page, offset, len);
1643                 if (page_is_pfmemalloc(page))
1644                         xdp_buff_set_frag_pfmemalloc(xdp);
1645
1646                 shinfo->xdp_frags_size += len;
1647         }
1648
1649         *xdp_frags_truesize = xdp_frags_truesz;
1650         return 0;
1651
1652 err:
1653         put_xdp_frags(xdp);
1654         return -EINVAL;
1655 }
1656
1657 static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1658                                    struct receive_queue *rq,
1659                                    struct bpf_prog *xdp_prog,
1660                                    void *ctx,
1661                                    unsigned int *frame_sz,
1662                                    int *num_buf,
1663                                    struct page **page,
1664                                    int offset,
1665                                    unsigned int *len,
1666                                    struct virtio_net_hdr_mrg_rxbuf *hdr)
1667 {
1668         unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1669         unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1670         struct page *xdp_page;
1671         unsigned int xdp_room;
1672
1673         /* Transient failure which in theory could occur if
1674          * in-flight packets from before XDP was enabled reach
1675          * the receive path after XDP is loaded.
1676          */
1677         if (unlikely(hdr->hdr.gso_type))
1678                 return NULL;
1679
1680         /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
1681          * with headroom may add hole in truesize, which
1682          * make their length exceed PAGE_SIZE. So we disabled the
1683          * hole mechanism for xdp. See add_recvbuf_mergeable().
1684          */
1685         *frame_sz = truesize;
1686
1687         if (likely(headroom >= virtnet_get_headroom(vi) &&
1688                    (*num_buf == 1 || xdp_prog->aux->xdp_has_frags))) {
1689                 return page_address(*page) + offset;
1690         }
1691
1692         /* This happens when headroom is not enough because
1693          * of the buffer was prefilled before XDP is set.
1694          * This should only happen for the first several packets.
1695          * In fact, vq reset can be used here to help us clean up
1696          * the prefilled buffers, but many existing devices do not
1697          * support it, and we don't want to bother users who are
1698          * using xdp normally.
1699          */
1700         if (!xdp_prog->aux->xdp_has_frags) {
1701                 /* linearize data for XDP */
1702                 xdp_page = xdp_linearize_page(rq, num_buf,
1703                                               *page, offset,
1704                                               VIRTIO_XDP_HEADROOM,
1705                                               len);
1706                 if (!xdp_page)
1707                         return NULL;
1708         } else {
1709                 xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
1710                                           sizeof(struct skb_shared_info));
1711                 if (*len + xdp_room > PAGE_SIZE)
1712                         return NULL;
1713
1714                 xdp_page = alloc_page(GFP_ATOMIC);
1715                 if (!xdp_page)
1716                         return NULL;
1717
1718                 memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
1719                        page_address(*page) + offset, *len);
1720         }
1721
1722         *frame_sz = PAGE_SIZE;
1723
1724         put_page(*page);
1725
1726         *page = xdp_page;
1727
1728         return page_address(*page) + VIRTIO_XDP_HEADROOM;
1729 }
1730
1731 static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
1732                                              struct virtnet_info *vi,
1733                                              struct receive_queue *rq,
1734                                              struct bpf_prog *xdp_prog,
1735                                              void *buf,
1736                                              void *ctx,
1737                                              unsigned int len,
1738                                              unsigned int *xdp_xmit,
1739                                              struct virtnet_rq_stats *stats)
1740 {
1741         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1742         int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1743         struct page *page = virt_to_head_page(buf);
1744         int offset = buf - page_address(page);
1745         unsigned int xdp_frags_truesz = 0;
1746         struct sk_buff *head_skb;
1747         unsigned int frame_sz;
1748         struct xdp_buff xdp;
1749         void *data;
1750         u32 act;
1751         int err;
1752
1753         data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1754                                      offset, &len, hdr);
1755         if (unlikely(!data))
1756                 goto err_xdp;
1757
1758         err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1759                                          &num_buf, &xdp_frags_truesz, stats);
1760         if (unlikely(err))
1761                 goto err_xdp;
1762
1763         act = virtnet_xdp_handler(xdp_prog, &xdp, dev, xdp_xmit, stats);
1764
1765         switch (act) {
1766         case XDP_PASS:
1767                 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1768                 if (unlikely(!head_skb))
1769                         break;
1770                 return head_skb;
1771
1772         case XDP_TX:
1773         case XDP_REDIRECT:
1774                 return NULL;
1775
1776         default:
1777                 break;
1778         }
1779
1780         put_xdp_frags(&xdp);
1781
1782 err_xdp:
1783         put_page(page);
1784         mergeable_buf_free(rq, num_buf, dev, stats);
1785
1786         u64_stats_inc(&stats->xdp_drops);
1787         u64_stats_inc(&stats->drops);
1788         return NULL;
1789 }
1790
1791 static struct sk_buff *receive_mergeable(struct net_device *dev,
1792                                          struct virtnet_info *vi,
1793                                          struct receive_queue *rq,
1794                                          void *buf,
1795                                          void *ctx,
1796                                          unsigned int len,
1797                                          unsigned int *xdp_xmit,
1798                                          struct virtnet_rq_stats *stats)
1799 {
1800         struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
1801         int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1802         struct page *page = virt_to_head_page(buf);
1803         int offset = buf - page_address(page);
1804         struct sk_buff *head_skb, *curr_skb;
1805         unsigned int truesize = mergeable_ctx_to_truesize(ctx);
1806         unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1807         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1808         unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1809
1810         head_skb = NULL;
1811         u64_stats_add(&stats->bytes, len - vi->hdr_len);
1812
1813         if (unlikely(len > truesize - room)) {
1814                 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1815                          dev->name, len, (unsigned long)(truesize - room));
1816                 DEV_STATS_INC(dev, rx_length_errors);
1817                 goto err_skb;
1818         }
1819
1820         if (unlikely(vi->xdp_enabled)) {
1821                 struct bpf_prog *xdp_prog;
1822
1823                 rcu_read_lock();
1824                 xdp_prog = rcu_dereference(rq->xdp_prog);
1825                 if (xdp_prog) {
1826                         head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1827                                                          len, xdp_xmit, stats);
1828                         rcu_read_unlock();
1829                         return head_skb;
1830                 }
1831                 rcu_read_unlock();
1832         }
1833
1834         head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1835         curr_skb = head_skb;
1836
1837         if (unlikely(!curr_skb))
1838                 goto err_skb;
1839         while (--num_buf) {
1840                 int num_skb_frags;
1841
1842                 buf = virtnet_rq_get_buf(rq, &len, &ctx);
1843                 if (unlikely(!buf)) {
1844                         pr_debug("%s: rx error: %d buffers out of %d missing\n",
1845                                  dev->name, num_buf,
1846                                  virtio16_to_cpu(vi->vdev,
1847                                                  hdr->num_buffers));
1848                         DEV_STATS_INC(dev, rx_length_errors);
1849                         goto err_buf;
1850                 }
1851
1852                 u64_stats_add(&stats->bytes, len);
1853                 page = virt_to_head_page(buf);
1854
1855                 truesize = mergeable_ctx_to_truesize(ctx);
1856                 headroom = mergeable_ctx_to_headroom(ctx);
1857                 tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1858                 room = SKB_DATA_ALIGN(headroom + tailroom);
1859                 if (unlikely(len > truesize - room)) {
1860                         pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
1861                                  dev->name, len, (unsigned long)(truesize - room));
1862                         DEV_STATS_INC(dev, rx_length_errors);
1863                         goto err_skb;
1864                 }
1865
1866                 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
1867                 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
1868                         struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
1869
1870                         if (unlikely(!nskb))
1871                                 goto err_skb;
1872                         if (curr_skb == head_skb)
1873                                 skb_shinfo(curr_skb)->frag_list = nskb;
1874                         else
1875                                 curr_skb->next = nskb;
1876                         curr_skb = nskb;
1877                         head_skb->truesize += nskb->truesize;
1878                         num_skb_frags = 0;
1879                 }
1880                 if (curr_skb != head_skb) {
1881                         head_skb->data_len += len;
1882                         head_skb->len += len;
1883                         head_skb->truesize += truesize;
1884                 }
1885                 offset = buf - page_address(page);
1886                 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
1887                         put_page(page);
1888                         skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
1889                                              len, truesize);
1890                 } else {
1891                         skb_add_rx_frag(curr_skb, num_skb_frags, page,
1892                                         offset, len, truesize);
1893                 }
1894         }
1895
1896         ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
1897         return head_skb;
1898
1899 err_skb:
1900         put_page(page);
1901         mergeable_buf_free(rq, num_buf, dev, stats);
1902
1903 err_buf:
1904         u64_stats_inc(&stats->drops);
1905         dev_kfree_skb(head_skb);
1906         return NULL;
1907 }
1908
1909 static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
1910                                 struct sk_buff *skb)
1911 {
1912         enum pkt_hash_types rss_hash_type;
1913
1914         if (!hdr_hash || !skb)
1915                 return;
1916
1917         switch (__le16_to_cpu(hdr_hash->hash_report)) {
1918         case VIRTIO_NET_HASH_REPORT_TCPv4:
1919         case VIRTIO_NET_HASH_REPORT_UDPv4:
1920         case VIRTIO_NET_HASH_REPORT_TCPv6:
1921         case VIRTIO_NET_HASH_REPORT_UDPv6:
1922         case VIRTIO_NET_HASH_REPORT_TCPv6_EX:
1923         case VIRTIO_NET_HASH_REPORT_UDPv6_EX:
1924                 rss_hash_type = PKT_HASH_TYPE_L4;
1925                 break;
1926         case VIRTIO_NET_HASH_REPORT_IPv4:
1927         case VIRTIO_NET_HASH_REPORT_IPv6:
1928         case VIRTIO_NET_HASH_REPORT_IPv6_EX:
1929                 rss_hash_type = PKT_HASH_TYPE_L3;
1930                 break;
1931         case VIRTIO_NET_HASH_REPORT_NONE:
1932         default:
1933                 rss_hash_type = PKT_HASH_TYPE_NONE;
1934         }
1935         skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
1936 }
1937
1938 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1939                         void *buf, unsigned int len, void **ctx,
1940                         unsigned int *xdp_xmit,
1941                         struct virtnet_rq_stats *stats)
1942 {
1943         struct net_device *dev = vi->dev;
1944         struct sk_buff *skb;
1945         struct virtio_net_common_hdr *hdr;
1946
1947         if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1948                 pr_debug("%s: short packet %i\n", dev->name, len);
1949                 DEV_STATS_INC(dev, rx_length_errors);
1950                 virtnet_rq_free_buf(vi, rq, buf);
1951                 return;
1952         }
1953
1954         if (vi->mergeable_rx_bufs)
1955                 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1956                                         stats);
1957         else if (vi->big_packets)
1958                 skb = receive_big(dev, vi, rq, buf, len, stats);
1959         else
1960                 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1961
1962         if (unlikely(!skb))
1963                 return;
1964
1965         hdr = skb_vnet_common_hdr(skb);
1966         if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1967                 virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
1968
1969         if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1970                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1971
1972         if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1973                                   virtio_is_little_endian(vi->vdev))) {
1974                 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1975                                      dev->name, hdr->hdr.gso_type,
1976                                      hdr->hdr.gso_size);
1977                 goto frame_err;
1978         }
1979
1980         skb_record_rx_queue(skb, vq2rxq(rq->vq));
1981         skb->protocol = eth_type_trans(skb, dev);
1982         pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1983                  ntohs(skb->protocol), skb->len, skb->pkt_type);
1984
1985         napi_gro_receive(&rq->napi, skb);
1986         return;
1987
1988 frame_err:
1989         DEV_STATS_INC(dev, rx_frame_errors);
1990         dev_kfree_skb(skb);
1991 }
1992
1993 /* Unlike mergeable buffers, all buffers are allocated to the
1994  * same size, except for the headroom. For this reason we do
1995  * not need to use  mergeable_len_to_ctx here - it is enough
1996  * to store the headroom as the context ignoring the truesize.
1997  */
1998 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1999                              gfp_t gfp)
2000 {
2001         char *buf;
2002         unsigned int xdp_headroom = virtnet_get_headroom(vi);
2003         void *ctx = (void *)(unsigned long)xdp_headroom;
2004         int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
2005         int err;
2006
2007         len = SKB_DATA_ALIGN(len) +
2008               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2009
2010         buf = virtnet_rq_alloc(rq, len, gfp);
2011         if (unlikely(!buf))
2012                 return -ENOMEM;
2013
2014         virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
2015                                vi->hdr_len + GOOD_PACKET_LEN);
2016
2017         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
2018         if (err < 0) {
2019                 virtnet_rq_unmap(rq, buf, 0);
2020                 put_page(virt_to_head_page(buf));
2021         }
2022
2023         return err;
2024 }
2025
2026 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
2027                            gfp_t gfp)
2028 {
2029         struct page *first, *list = NULL;
2030         char *p;
2031         int i, err, offset;
2032
2033         sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
2034
2035         /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
2036         for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
2037                 first = get_a_page(rq, gfp);
2038                 if (!first) {
2039                         if (list)
2040                                 give_pages(rq, list);
2041                         return -ENOMEM;
2042                 }
2043                 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
2044
2045                 /* chain new page in list head to match sg */
2046                 first->private = (unsigned long)list;
2047                 list = first;
2048         }
2049
2050         first = get_a_page(rq, gfp);
2051         if (!first) {
2052                 give_pages(rq, list);
2053                 return -ENOMEM;
2054         }
2055         p = page_address(first);
2056
2057         /* rq->sg[0], rq->sg[1] share the same page */
2058         /* a separated rq->sg[0] for header - required in case !any_header_sg */
2059         sg_set_buf(&rq->sg[0], p, vi->hdr_len);
2060
2061         /* rq->sg[1] for data packet, from offset */
2062         offset = sizeof(struct padded_vnet_hdr);
2063         sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
2064
2065         /* chain first in list head */
2066         first->private = (unsigned long)list;
2067         err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
2068                                   first, gfp);
2069         if (err < 0)
2070                 give_pages(rq, first);
2071
2072         return err;
2073 }
2074
2075 static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
2076                                           struct ewma_pkt_len *avg_pkt_len,
2077                                           unsigned int room)
2078 {
2079         struct virtnet_info *vi = rq->vq->vdev->priv;
2080         const size_t hdr_len = vi->hdr_len;
2081         unsigned int len;
2082
2083         if (room)
2084                 return PAGE_SIZE - room;
2085
2086         len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
2087                                 rq->min_buf_len, PAGE_SIZE - hdr_len);
2088
2089         return ALIGN(len, L1_CACHE_BYTES);
2090 }
2091
2092 static int add_recvbuf_mergeable(struct virtnet_info *vi,
2093                                  struct receive_queue *rq, gfp_t gfp)
2094 {
2095         struct page_frag *alloc_frag = &rq->alloc_frag;
2096         unsigned int headroom = virtnet_get_headroom(vi);
2097         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2098         unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
2099         unsigned int len, hole;
2100         void *ctx;
2101         char *buf;
2102         int err;
2103
2104         /* Extra tailroom is needed to satisfy XDP's assumption. This
2105          * means rx frags coalescing won't work, but consider we've
2106          * disabled GSO for XDP, it won't be a big issue.
2107          */
2108         len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
2109
2110         buf = virtnet_rq_alloc(rq, len + room, gfp);
2111         if (unlikely(!buf))
2112                 return -ENOMEM;
2113
2114         buf += headroom; /* advance address leaving hole at front of pkt */
2115         hole = alloc_frag->size - alloc_frag->offset;
2116         if (hole < len + room) {
2117                 /* To avoid internal fragmentation, if there is very likely not
2118                  * enough space for another buffer, add the remaining space to
2119                  * the current buffer.
2120                  * XDP core assumes that frame_size of xdp_buff and the length
2121                  * of the frag are PAGE_SIZE, so we disable the hole mechanism.
2122                  */
2123                 if (!headroom)
2124                         len += hole;
2125                 alloc_frag->offset += hole;
2126         }
2127
2128         virtnet_rq_init_one_sg(rq, buf, len);
2129
2130         ctx = mergeable_len_to_ctx(len + room, headroom);
2131         err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
2132         if (err < 0) {
2133                 virtnet_rq_unmap(rq, buf, 0);
2134                 put_page(virt_to_head_page(buf));
2135         }
2136
2137         return err;
2138 }
2139
2140 /*
2141  * Returns false if we couldn't fill entirely (OOM).
2142  *
2143  * Normally run in the receive path, but can also be run from ndo_open
2144  * before we're receiving packets, or from refill_work which is
2145  * careful to disable receiving (using napi_disable).
2146  */
2147 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
2148                           gfp_t gfp)
2149 {
2150         int err;
2151         bool oom;
2152
2153         do {
2154                 if (vi->mergeable_rx_bufs)
2155                         err = add_recvbuf_mergeable(vi, rq, gfp);
2156                 else if (vi->big_packets)
2157                         err = add_recvbuf_big(vi, rq, gfp);
2158                 else
2159                         err = add_recvbuf_small(vi, rq, gfp);
2160
2161                 oom = err == -ENOMEM;
2162                 if (err)
2163                         break;
2164         } while (rq->vq->num_free);
2165         if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
2166                 unsigned long flags;
2167
2168                 flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
2169                 u64_stats_inc(&rq->stats.kicks);
2170                 u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
2171         }
2172
2173         return !oom;
2174 }
2175
2176 static void skb_recv_done(struct virtqueue *rvq)
2177 {
2178         struct virtnet_info *vi = rvq->vdev->priv;
2179         struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2180
2181         rq->calls++;
2182         virtqueue_napi_schedule(&rq->napi, rvq);
2183 }
2184
2185 static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
2186 {
2187         napi_enable(napi);
2188
2189         /* If all buffers were filled by other side before we napi_enabled, we
2190          * won't get another interrupt, so process any outstanding packets now.
2191          * Call local_bh_enable after to trigger softIRQ processing.
2192          */
2193         local_bh_disable();
2194         virtqueue_napi_schedule(napi, vq);
2195         local_bh_enable();
2196 }
2197
2198 static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2199                                    struct virtqueue *vq,
2200                                    struct napi_struct *napi)
2201 {
2202         if (!napi->weight)
2203                 return;
2204
2205         /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
2206          * enable the feature if this is likely affine with the transmit path.
2207          */
2208         if (!vi->affinity_hint_set) {
2209                 napi->weight = 0;
2210                 return;
2211         }
2212
2213         return virtnet_napi_enable(vq, napi);
2214 }
2215
2216 static void virtnet_napi_tx_disable(struct napi_struct *napi)
2217 {
2218         if (napi->weight)
2219                 napi_disable(napi);
2220 }
2221
2222 static void refill_work(struct work_struct *work)
2223 {
2224         struct virtnet_info *vi =
2225                 container_of(work, struct virtnet_info, refill.work);
2226         bool still_empty;
2227         int i;
2228
2229         for (i = 0; i < vi->curr_queue_pairs; i++) {
2230                 struct receive_queue *rq = &vi->rq[i];
2231
2232                 napi_disable(&rq->napi);
2233                 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2234                 virtnet_napi_enable(rq->vq, &rq->napi);
2235
2236                 /* In theory, this can happen: if we don't get any buffers in
2237                  * we will *never* try to fill again.
2238                  */
2239                 if (still_empty)
2240                         schedule_delayed_work(&vi->refill, HZ/2);
2241         }
2242 }
2243
2244 static int virtnet_receive(struct receive_queue *rq, int budget,
2245                            unsigned int *xdp_xmit)
2246 {
2247         struct virtnet_info *vi = rq->vq->vdev->priv;
2248         struct virtnet_rq_stats stats = {};
2249         unsigned int len;
2250         int packets = 0;
2251         void *buf;
2252         int i;
2253
2254         if (!vi->big_packets || vi->mergeable_rx_bufs) {
2255                 void *ctx;
2256
2257                 while (packets < budget &&
2258                        (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
2259                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2260                         packets++;
2261                 }
2262         } else {
2263                 while (packets < budget &&
2264                        (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
2265                         receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2266                         packets++;
2267                 }
2268         }
2269
2270         if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
2271                 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2272                         spin_lock(&vi->refill_lock);
2273                         if (vi->refill_enabled)
2274                                 schedule_delayed_work(&vi->refill, 0);
2275                         spin_unlock(&vi->refill_lock);
2276                 }
2277         }
2278
2279         u64_stats_set(&stats.packets, packets);
2280         u64_stats_update_begin(&rq->stats.syncp);
2281         for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
2282                 size_t offset = virtnet_rq_stats_desc[i].offset;
2283                 u64_stats_t *item, *src;
2284
2285                 item = (u64_stats_t *)((u8 *)&rq->stats + offset);
2286                 src = (u64_stats_t *)((u8 *)&stats + offset);
2287                 u64_stats_add(item, u64_stats_read(src));
2288         }
2289
2290         u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets));
2291         u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes));
2292
2293         u64_stats_update_end(&rq->stats.syncp);
2294
2295         return packets;
2296 }
2297
2298 static void virtnet_poll_cleantx(struct receive_queue *rq)
2299 {
2300         struct virtnet_info *vi = rq->vq->vdev->priv;
2301         unsigned int index = vq2rxq(rq->vq);
2302         struct send_queue *sq = &vi->sq[index];
2303         struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2304
2305         if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2306                 return;
2307
2308         if (__netif_tx_trylock(txq)) {
2309                 if (sq->reset) {
2310                         __netif_tx_unlock(txq);
2311                         return;
2312                 }
2313
2314                 do {
2315                         virtqueue_disable_cb(sq->vq);
2316                         free_old_xmit(sq, true);
2317                 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2318
2319                 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
2320                         if (netif_tx_queue_stopped(txq)) {
2321                                 u64_stats_update_begin(&sq->stats.syncp);
2322                                 u64_stats_inc(&sq->stats.wake);
2323                                 u64_stats_update_end(&sq->stats.syncp);
2324                         }
2325                         netif_tx_wake_queue(txq);
2326                 }
2327
2328                 __netif_tx_unlock(txq);
2329         }
2330 }
2331
2332 static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
2333 {
2334         struct dim_sample cur_sample = {};
2335
2336         if (!rq->packets_in_napi)
2337                 return;
2338
2339         u64_stats_update_begin(&rq->stats.syncp);
2340         dim_update_sample(rq->calls,
2341                           u64_stats_read(&rq->stats.packets),
2342                           u64_stats_read(&rq->stats.bytes),
2343                           &cur_sample);
2344         u64_stats_update_end(&rq->stats.syncp);
2345
2346         net_dim(&rq->dim, cur_sample);
2347         rq->packets_in_napi = 0;
2348 }
2349
2350 static int virtnet_poll(struct napi_struct *napi, int budget)
2351 {
2352         struct receive_queue *rq =
2353                 container_of(napi, struct receive_queue, napi);
2354         struct virtnet_info *vi = rq->vq->vdev->priv;
2355         struct send_queue *sq;
2356         unsigned int received;
2357         unsigned int xdp_xmit = 0;
2358         bool napi_complete;
2359
2360         virtnet_poll_cleantx(rq);
2361
2362         received = virtnet_receive(rq, budget, &xdp_xmit);
2363         rq->packets_in_napi += received;
2364
2365         if (xdp_xmit & VIRTIO_XDP_REDIR)
2366                 xdp_do_flush();
2367
2368         /* Out of packets? */
2369         if (received < budget) {
2370                 napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
2371                 /* Intentionally not taking dim_lock here. This may result in a
2372                  * spurious net_dim call. But if that happens virtnet_rx_dim_work
2373                  * will not act on the scheduled work.
2374                  */
2375                 if (napi_complete && rq->dim_enabled)
2376                         virtnet_rx_dim_update(vi, rq);
2377         }
2378
2379         if (xdp_xmit & VIRTIO_XDP_TX) {
2380                 sq = virtnet_xdp_get_sq(vi);
2381                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2382                         u64_stats_update_begin(&sq->stats.syncp);
2383                         u64_stats_inc(&sq->stats.kicks);
2384                         u64_stats_update_end(&sq->stats.syncp);
2385                 }
2386                 virtnet_xdp_put_sq(vi, sq);
2387         }
2388
2389         return received;
2390 }
2391
2392 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
2393 {
2394         virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
2395         napi_disable(&vi->rq[qp_index].napi);
2396         xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2397 }
2398
2399 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
2400 {
2401         struct net_device *dev = vi->dev;
2402         int err;
2403
2404         err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
2405                                vi->rq[qp_index].napi.napi_id);
2406         if (err < 0)
2407                 return err;
2408
2409         err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2410                                          MEM_TYPE_PAGE_SHARED, NULL);
2411         if (err < 0)
2412                 goto err_xdp_reg_mem_model;
2413
2414         virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2415         virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2416
2417         return 0;
2418
2419 err_xdp_reg_mem_model:
2420         xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2421         return err;
2422 }
2423
2424 static int virtnet_open(struct net_device *dev)
2425 {
2426         struct virtnet_info *vi = netdev_priv(dev);
2427         int i, err;
2428
2429         enable_delayed_refill(vi);
2430
2431         for (i = 0; i < vi->max_queue_pairs; i++) {
2432                 if (i < vi->curr_queue_pairs)
2433                         /* Make sure we have some buffers: if oom use wq. */
2434                         if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2435                                 schedule_delayed_work(&vi->refill, 0);
2436
2437                 err = virtnet_enable_queue_pair(vi, i);
2438                 if (err < 0)
2439                         goto err_enable_qp;
2440         }
2441
2442         return 0;
2443
2444 err_enable_qp:
2445         disable_delayed_refill(vi);
2446         cancel_delayed_work_sync(&vi->refill);
2447
2448         for (i--; i >= 0; i--) {
2449                 virtnet_disable_queue_pair(vi, i);
2450                 cancel_work_sync(&vi->rq[i].dim.work);
2451         }
2452
2453         return err;
2454 }
2455
2456 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2457 {
2458         struct send_queue *sq = container_of(napi, struct send_queue, napi);
2459         struct virtnet_info *vi = sq->vq->vdev->priv;
2460         unsigned int index = vq2txq(sq->vq);
2461         struct netdev_queue *txq;
2462         int opaque;
2463         bool done;
2464
2465         if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2466                 /* We don't need to enable cb for XDP */
2467                 napi_complete_done(napi, 0);
2468                 return 0;
2469         }
2470
2471         txq = netdev_get_tx_queue(vi->dev, index);
2472         __netif_tx_lock(txq, raw_smp_processor_id());
2473         virtqueue_disable_cb(sq->vq);
2474         free_old_xmit(sq, true);
2475
2476         if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
2477                 if (netif_tx_queue_stopped(txq)) {
2478                         u64_stats_update_begin(&sq->stats.syncp);
2479                         u64_stats_inc(&sq->stats.wake);
2480                         u64_stats_update_end(&sq->stats.syncp);
2481                 }
2482                 netif_tx_wake_queue(txq);
2483         }
2484
2485         opaque = virtqueue_enable_cb_prepare(sq->vq);
2486
2487         done = napi_complete_done(napi, 0);
2488
2489         if (!done)
2490                 virtqueue_disable_cb(sq->vq);
2491
2492         __netif_tx_unlock(txq);
2493
2494         if (done) {
2495                 if (unlikely(virtqueue_poll(sq->vq, opaque))) {
2496                         if (napi_schedule_prep(napi)) {
2497                                 __netif_tx_lock(txq, raw_smp_processor_id());
2498                                 virtqueue_disable_cb(sq->vq);
2499                                 __netif_tx_unlock(txq);
2500                                 __napi_schedule(napi);
2501                         }
2502                 }
2503         }
2504
2505         return 0;
2506 }
2507
2508 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
2509 {
2510         struct virtio_net_hdr_mrg_rxbuf *hdr;
2511         const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
2512         struct virtnet_info *vi = sq->vq->vdev->priv;
2513         int num_sg;
2514         unsigned hdr_len = vi->hdr_len;
2515         bool can_push;
2516
2517         pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2518
2519         can_push = vi->any_header_sg &&
2520                 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
2521                 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
2522         /* Even if we can, don't push here yet as this would skew
2523          * csum_start offset below. */
2524         if (can_push)
2525                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
2526         else
2527                 hdr = &skb_vnet_common_hdr(skb)->mrg_hdr;
2528
2529         if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
2530                                     virtio_is_little_endian(vi->vdev), false,
2531                                     0))
2532                 return -EPROTO;
2533
2534         if (vi->mergeable_rx_bufs)
2535                 hdr->num_buffers = 0;
2536
2537         sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
2538         if (can_push) {
2539                 __skb_push(skb, hdr_len);
2540                 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
2541                 if (unlikely(num_sg < 0))
2542                         return num_sg;
2543                 /* Pull header back to avoid skew in tx bytes calculations. */
2544                 __skb_pull(skb, hdr_len);
2545         } else {
2546                 sg_set_buf(sq->sg, hdr, hdr_len);
2547                 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
2548                 if (unlikely(num_sg < 0))
2549                         return num_sg;
2550                 num_sg++;
2551         }
2552         return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
2553 }
2554
2555 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2556 {
2557         struct virtnet_info *vi = netdev_priv(dev);
2558         int qnum = skb_get_queue_mapping(skb);
2559         struct send_queue *sq = &vi->sq[qnum];
2560         int err;
2561         struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
2562         bool kick = !netdev_xmit_more();
2563         bool use_napi = sq->napi.weight;
2564
2565         /* Free up any pending old buffers before queueing new ones. */
2566         do {
2567                 if (use_napi)
2568                         virtqueue_disable_cb(sq->vq);
2569
2570                 free_old_xmit(sq, false);
2571
2572         } while (use_napi && kick &&
2573                unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
2574
2575         /* timestamp packet in software */
2576         skb_tx_timestamp(skb);
2577
2578         /* Try to transmit */
2579         err = xmit_skb(sq, skb);
2580
2581         /* This should not happen! */
2582         if (unlikely(err)) {
2583                 DEV_STATS_INC(dev, tx_fifo_errors);
2584                 if (net_ratelimit())
2585                         dev_warn(&dev->dev,
2586                                  "Unexpected TXQ (%d) queue failure: %d\n",
2587                                  qnum, err);
2588                 DEV_STATS_INC(dev, tx_dropped);
2589                 dev_kfree_skb_any(skb);
2590                 return NETDEV_TX_OK;
2591         }
2592
2593         /* Don't wait up for transmitted skbs to be freed. */
2594         if (!use_napi) {
2595                 skb_orphan(skb);
2596                 nf_reset_ct(skb);
2597         }
2598
2599         check_sq_full_and_disable(vi, dev, sq);
2600
2601         if (kick || netif_xmit_stopped(txq)) {
2602                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
2603                         u64_stats_update_begin(&sq->stats.syncp);
2604                         u64_stats_inc(&sq->stats.kicks);
2605                         u64_stats_update_end(&sq->stats.syncp);
2606                 }
2607         }
2608
2609         return NETDEV_TX_OK;
2610 }
2611
2612 static int virtnet_rx_resize(struct virtnet_info *vi,
2613                              struct receive_queue *rq, u32 ring_num)
2614 {
2615         bool running = netif_running(vi->dev);
2616         int err, qindex;
2617
2618         qindex = rq - vi->rq;
2619
2620         if (running) {
2621                 napi_disable(&rq->napi);
2622                 cancel_work_sync(&rq->dim.work);
2623         }
2624
2625         err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
2626         if (err)
2627                 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2628
2629         if (!try_fill_recv(vi, rq, GFP_KERNEL))
2630                 schedule_delayed_work(&vi->refill, 0);
2631
2632         if (running)
2633                 virtnet_napi_enable(rq->vq, &rq->napi);
2634         return err;
2635 }
2636
2637 static int virtnet_tx_resize(struct virtnet_info *vi,
2638                              struct send_queue *sq, u32 ring_num)
2639 {
2640         bool running = netif_running(vi->dev);
2641         struct netdev_queue *txq;
2642         int err, qindex;
2643
2644         qindex = sq - vi->sq;
2645
2646         if (running)
2647                 virtnet_napi_tx_disable(&sq->napi);
2648
2649         txq = netdev_get_tx_queue(vi->dev, qindex);
2650
2651         /* 1. wait all ximt complete
2652          * 2. fix the race of netif_stop_subqueue() vs netif_start_subqueue()
2653          */
2654         __netif_tx_lock_bh(txq);
2655
2656         /* Prevent rx poll from accessing sq. */
2657         sq->reset = true;
2658
2659         /* Prevent the upper layer from trying to send packets. */
2660         netif_stop_subqueue(vi->dev, qindex);
2661
2662         __netif_tx_unlock_bh(txq);
2663
2664         err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
2665         if (err)
2666                 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2667
2668         __netif_tx_lock_bh(txq);
2669         sq->reset = false;
2670         netif_tx_wake_queue(txq);
2671         __netif_tx_unlock_bh(txq);
2672
2673         if (running)
2674                 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2675         return err;
2676 }
2677
2678 /*
2679  * Send command via the control virtqueue and check status.  Commands
2680  * supported by the hypervisor, as indicated by feature bits, should
2681  * never fail unless improperly formatted.
2682  */
2683 static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd,
2684                                        struct scatterlist *out,
2685                                        struct scatterlist *in)
2686 {
2687         struct scatterlist *sgs[5], hdr, stat;
2688         u32 out_num = 0, tmp, in_num = 0;
2689         int ret;
2690
2691         /* Caller should know better */
2692         BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
2693
2694         mutex_lock(&vi->cvq_lock);
2695         vi->ctrl->status = ~0;
2696         vi->ctrl->hdr.class = class;
2697         vi->ctrl->hdr.cmd = cmd;
2698         /* Add header */
2699         sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2700         sgs[out_num++] = &hdr;
2701
2702         if (out)
2703                 sgs[out_num++] = out;
2704
2705         /* Add return status. */
2706         sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2707         sgs[out_num + in_num++] = &stat;
2708
2709         if (in)
2710                 sgs[out_num + in_num++] = in;
2711
2712         BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
2713         ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
2714         if (ret < 0) {
2715                 dev_warn(&vi->vdev->dev,
2716                          "Failed to add sgs for command vq: %d\n.", ret);
2717                 mutex_unlock(&vi->cvq_lock);
2718                 return false;
2719         }
2720
2721         if (unlikely(!virtqueue_kick(vi->cvq)))
2722                 goto unlock;
2723
2724         /* Spin for a response, the kick causes an ioport write, trapping
2725          * into the hypervisor, so the request should be handled immediately.
2726          */
2727         while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2728                !virtqueue_is_broken(vi->cvq)) {
2729                 cond_resched();
2730                 cpu_relax();
2731         }
2732
2733 unlock:
2734         mutex_unlock(&vi->cvq_lock);
2735         return vi->ctrl->status == VIRTIO_NET_OK;
2736 }
2737
2738 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2739                                  struct scatterlist *out)
2740 {
2741         return virtnet_send_command_reply(vi, class, cmd, out, NULL);
2742 }
2743
2744 static int virtnet_set_mac_address(struct net_device *dev, void *p)
2745 {
2746         struct virtnet_info *vi = netdev_priv(dev);
2747         struct virtio_device *vdev = vi->vdev;
2748         int ret;
2749         struct sockaddr *addr;
2750         struct scatterlist sg;
2751
2752         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2753                 return -EOPNOTSUPP;
2754
2755         addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
2756         if (!addr)
2757                 return -ENOMEM;
2758
2759         ret = eth_prepare_mac_addr_change(dev, addr);
2760         if (ret)
2761                 goto out;
2762
2763         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
2764                 sg_init_one(&sg, addr->sa_data, dev->addr_len);
2765                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2766                                           VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
2767                         dev_warn(&vdev->dev,
2768                                  "Failed to set mac address by vq command.\n");
2769                         ret = -EINVAL;
2770                         goto out;
2771                 }
2772         } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
2773                    !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2774                 unsigned int i;
2775
2776                 /* Naturally, this has an atomicity problem. */
2777                 for (i = 0; i < dev->addr_len; i++)
2778                         virtio_cwrite8(vdev,
2779                                        offsetof(struct virtio_net_config, mac) +
2780                                        i, addr->sa_data[i]);
2781         }
2782
2783         eth_commit_mac_addr_change(dev, p);
2784         ret = 0;
2785
2786 out:
2787         kfree(addr);
2788         return ret;
2789 }
2790
2791 static void virtnet_stats(struct net_device *dev,
2792                           struct rtnl_link_stats64 *tot)
2793 {
2794         struct virtnet_info *vi = netdev_priv(dev);
2795         unsigned int start;
2796         int i;
2797
2798         for (i = 0; i < vi->max_queue_pairs; i++) {
2799                 u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
2800                 struct receive_queue *rq = &vi->rq[i];
2801                 struct send_queue *sq = &vi->sq[i];
2802
2803                 do {
2804                         start = u64_stats_fetch_begin(&sq->stats.syncp);
2805                         tpackets = u64_stats_read(&sq->stats.packets);
2806                         tbytes   = u64_stats_read(&sq->stats.bytes);
2807                         terrors  = u64_stats_read(&sq->stats.tx_timeouts);
2808                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
2809
2810                 do {
2811                         start = u64_stats_fetch_begin(&rq->stats.syncp);
2812                         rpackets = u64_stats_read(&rq->stats.packets);
2813                         rbytes   = u64_stats_read(&rq->stats.bytes);
2814                         rdrops   = u64_stats_read(&rq->stats.drops);
2815                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
2816
2817                 tot->rx_packets += rpackets;
2818                 tot->tx_packets += tpackets;
2819                 tot->rx_bytes   += rbytes;
2820                 tot->tx_bytes   += tbytes;
2821                 tot->rx_dropped += rdrops;
2822                 tot->tx_errors  += terrors;
2823         }
2824
2825         tot->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
2826         tot->tx_fifo_errors = DEV_STATS_READ(dev, tx_fifo_errors);
2827         tot->rx_length_errors = DEV_STATS_READ(dev, rx_length_errors);
2828         tot->rx_frame_errors = DEV_STATS_READ(dev, rx_frame_errors);
2829 }
2830
2831 static void virtnet_ack_link_announce(struct virtnet_info *vi)
2832 {
2833         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2834                                   VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
2835                 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2836 }
2837
2838 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2839 {
2840         struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
2841         struct scatterlist sg;
2842         struct net_device *dev = vi->dev;
2843
2844         if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2845                 return 0;
2846
2847         mq = kzalloc(sizeof(*mq), GFP_KERNEL);
2848         if (!mq)
2849                 return -ENOMEM;
2850
2851         mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2852         sg_init_one(&sg, mq, sizeof(*mq));
2853
2854         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2855                                   VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
2856                 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
2857                          queue_pairs);
2858                 return -EINVAL;
2859         } else {
2860                 vi->curr_queue_pairs = queue_pairs;
2861                 /* virtnet_open() will refill when device is going to up. */
2862                 if (dev->flags & IFF_UP)
2863                         schedule_delayed_work(&vi->refill, 0);
2864         }
2865
2866         return 0;
2867 }
2868
2869 static int virtnet_close(struct net_device *dev)
2870 {
2871         struct virtnet_info *vi = netdev_priv(dev);
2872         int i;
2873
2874         /* Make sure NAPI doesn't schedule refill work */
2875         disable_delayed_refill(vi);
2876         /* Make sure refill_work doesn't re-enable napi! */
2877         cancel_delayed_work_sync(&vi->refill);
2878
2879         for (i = 0; i < vi->max_queue_pairs; i++) {
2880                 virtnet_disable_queue_pair(vi, i);
2881                 cancel_work_sync(&vi->rq[i].dim.work);
2882         }
2883
2884         return 0;
2885 }
2886
2887 static void virtnet_rx_mode_work(struct work_struct *work)
2888 {
2889         struct virtnet_info *vi =
2890                 container_of(work, struct virtnet_info, rx_mode_work);
2891         u8 *promisc_allmulti  __free(kfree) = NULL;
2892         struct net_device *dev = vi->dev;
2893         struct scatterlist sg[2];
2894         struct virtio_net_ctrl_mac *mac_data;
2895         struct netdev_hw_addr *ha;
2896         int uc_count;
2897         int mc_count;
2898         void *buf;
2899         int i;
2900
2901         /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
2902         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2903                 return;
2904
2905         rtnl_lock();
2906
2907         promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_ATOMIC);
2908         if (!promisc_allmulti) {
2909                 dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
2910                 return;
2911         }
2912
2913         *promisc_allmulti = !!(dev->flags & IFF_PROMISC);
2914         sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
2915
2916         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2917                                   VIRTIO_NET_CTRL_RX_PROMISC, sg))
2918                 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
2919                          *promisc_allmulti ? "en" : "dis");
2920
2921         *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
2922         sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
2923
2924         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2925                                   VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
2926                 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
2927                          *promisc_allmulti ? "en" : "dis");
2928
2929         netif_addr_lock_bh(dev);
2930
2931         uc_count = netdev_uc_count(dev);
2932         mc_count = netdev_mc_count(dev);
2933         /* MAC filter - use one buffer for both lists */
2934         buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
2935                       (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
2936         mac_data = buf;
2937         if (!buf) {
2938                 netif_addr_unlock_bh(dev);
2939                 rtnl_unlock();
2940                 return;
2941         }
2942
2943         sg_init_table(sg, 2);
2944
2945         /* Store the unicast list and count in the front of the buffer */
2946         mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2947         i = 0;
2948         netdev_for_each_uc_addr(ha, dev)
2949                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2950
2951         sg_set_buf(&sg[0], mac_data,
2952                    sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
2953
2954         /* multicast list and count fill the end */
2955         mac_data = (void *)&mac_data->macs[uc_count][0];
2956
2957         mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2958         i = 0;
2959         netdev_for_each_mc_addr(ha, dev)
2960                 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
2961
2962         netif_addr_unlock_bh(dev);
2963
2964         sg_set_buf(&sg[1], mac_data,
2965                    sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
2966
2967         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2968                                   VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
2969                 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
2970
2971         rtnl_unlock();
2972
2973         kfree(buf);
2974 }
2975
2976 static void virtnet_set_rx_mode(struct net_device *dev)
2977 {
2978         struct virtnet_info *vi = netdev_priv(dev);
2979
2980         if (vi->rx_mode_work_enabled)
2981                 schedule_work(&vi->rx_mode_work);
2982 }
2983
2984 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
2985                                    __be16 proto, u16 vid)
2986 {
2987         struct virtnet_info *vi = netdev_priv(dev);
2988         __virtio16 *_vid __free(kfree) = NULL;
2989         struct scatterlist sg;
2990
2991         _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
2992         if (!_vid)
2993                 return -ENOMEM;
2994
2995         *_vid = cpu_to_virtio16(vi->vdev, vid);
2996         sg_init_one(&sg, _vid, sizeof(*_vid));
2997
2998         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2999                                   VIRTIO_NET_CTRL_VLAN_ADD, &sg))
3000                 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
3001         return 0;
3002 }
3003
3004 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
3005                                     __be16 proto, u16 vid)
3006 {
3007         struct virtnet_info *vi = netdev_priv(dev);
3008         __virtio16 *_vid __free(kfree) = NULL;
3009         struct scatterlist sg;
3010
3011         _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
3012         if (!_vid)
3013                 return -ENOMEM;
3014
3015         *_vid = cpu_to_virtio16(vi->vdev, vid);
3016         sg_init_one(&sg, _vid, sizeof(*_vid));
3017
3018         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
3019                                   VIRTIO_NET_CTRL_VLAN_DEL, &sg))
3020                 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
3021         return 0;
3022 }
3023
3024 static void virtnet_clean_affinity(struct virtnet_info *vi)
3025 {
3026         int i;
3027
3028         if (vi->affinity_hint_set) {
3029                 for (i = 0; i < vi->max_queue_pairs; i++) {
3030                         virtqueue_set_affinity(vi->rq[i].vq, NULL);
3031                         virtqueue_set_affinity(vi->sq[i].vq, NULL);
3032                 }
3033
3034                 vi->affinity_hint_set = false;
3035         }
3036 }
3037
3038 static void virtnet_set_affinity(struct virtnet_info *vi)
3039 {
3040         cpumask_var_t mask;
3041         int stragglers;
3042         int group_size;
3043         int i, j, cpu;
3044         int num_cpu;
3045         int stride;
3046
3047         if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3048                 virtnet_clean_affinity(vi);
3049                 return;
3050         }
3051
3052         num_cpu = num_online_cpus();
3053         stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
3054         stragglers = num_cpu >= vi->curr_queue_pairs ?
3055                         num_cpu % vi->curr_queue_pairs :
3056                         0;
3057         cpu = cpumask_first(cpu_online_mask);
3058
3059         for (i = 0; i < vi->curr_queue_pairs; i++) {
3060                 group_size = stride + (i < stragglers ? 1 : 0);
3061
3062                 for (j = 0; j < group_size; j++) {
3063                         cpumask_set_cpu(cpu, mask);
3064                         cpu = cpumask_next_wrap(cpu, cpu_online_mask,
3065                                                 nr_cpu_ids, false);
3066                 }
3067                 virtqueue_set_affinity(vi->rq[i].vq, mask);
3068                 virtqueue_set_affinity(vi->sq[i].vq, mask);
3069                 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
3070                 cpumask_clear(mask);
3071         }
3072
3073         vi->affinity_hint_set = true;
3074         free_cpumask_var(mask);
3075 }
3076
3077 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
3078 {
3079         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3080                                                    node);
3081         virtnet_set_affinity(vi);
3082         return 0;
3083 }
3084
3085 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
3086 {
3087         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3088                                                    node_dead);
3089         virtnet_set_affinity(vi);
3090         return 0;
3091 }
3092
3093 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
3094 {
3095         struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
3096                                                    node);
3097
3098         virtnet_clean_affinity(vi);
3099         return 0;
3100 }
3101
3102 static enum cpuhp_state virtionet_online;
3103
3104 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
3105 {
3106         int ret;
3107
3108         ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
3109         if (ret)
3110                 return ret;
3111         ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
3112                                                &vi->node_dead);
3113         if (!ret)
3114                 return ret;
3115         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
3116         return ret;
3117 }
3118
3119 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
3120 {
3121         cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
3122         cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
3123                                             &vi->node_dead);
3124 }
3125
3126 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3127                                          u16 vqn, u32 max_usecs, u32 max_packets)
3128 {
3129         struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
3130         struct scatterlist sgs;
3131
3132         coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
3133         if (!coal_vq)
3134                 return -ENOMEM;
3135
3136         coal_vq->vqn = cpu_to_le16(vqn);
3137         coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
3138         coal_vq->coal.max_packets = cpu_to_le32(max_packets);
3139         sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
3140
3141         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3142                                   VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
3143                                   &sgs))
3144                 return -EINVAL;
3145
3146         return 0;
3147 }
3148
3149 static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3150                                             u16 queue, u32 max_usecs,
3151                                             u32 max_packets)
3152 {
3153         int err;
3154
3155         err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
3156                                             max_usecs, max_packets);
3157         if (err)
3158                 return err;
3159
3160         vi->rq[queue].intr_coal.max_usecs = max_usecs;
3161         vi->rq[queue].intr_coal.max_packets = max_packets;
3162
3163         return 0;
3164 }
3165
3166 static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3167                                             u16 queue, u32 max_usecs,
3168                                             u32 max_packets)
3169 {
3170         int err;
3171
3172         err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
3173                                             max_usecs, max_packets);
3174         if (err)
3175                 return err;
3176
3177         vi->sq[queue].intr_coal.max_usecs = max_usecs;
3178         vi->sq[queue].intr_coal.max_packets = max_packets;
3179
3180         return 0;
3181 }
3182
3183 static void virtnet_get_ringparam(struct net_device *dev,
3184                                   struct ethtool_ringparam *ring,
3185                                   struct kernel_ethtool_ringparam *kernel_ring,
3186                                   struct netlink_ext_ack *extack)
3187 {
3188         struct virtnet_info *vi = netdev_priv(dev);
3189
3190         ring->rx_max_pending = vi->rq[0].vq->num_max;
3191         ring->tx_max_pending = vi->sq[0].vq->num_max;
3192         ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
3193         ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
3194 }
3195
3196 static int virtnet_set_ringparam(struct net_device *dev,
3197                                  struct ethtool_ringparam *ring,
3198                                  struct kernel_ethtool_ringparam *kernel_ring,
3199                                  struct netlink_ext_ack *extack)
3200 {
3201         struct virtnet_info *vi = netdev_priv(dev);
3202         u32 rx_pending, tx_pending;
3203         struct receive_queue *rq;
3204         struct send_queue *sq;
3205         int i, err;
3206
3207         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
3208                 return -EINVAL;
3209
3210         rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
3211         tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
3212
3213         if (ring->rx_pending == rx_pending &&
3214             ring->tx_pending == tx_pending)
3215                 return 0;
3216
3217         if (ring->rx_pending > vi->rq[0].vq->num_max)
3218                 return -EINVAL;
3219
3220         if (ring->tx_pending > vi->sq[0].vq->num_max)
3221                 return -EINVAL;
3222
3223         for (i = 0; i < vi->max_queue_pairs; i++) {
3224                 rq = vi->rq + i;
3225                 sq = vi->sq + i;
3226
3227                 if (ring->tx_pending != tx_pending) {
3228                         err = virtnet_tx_resize(vi, sq, ring->tx_pending);
3229                         if (err)
3230                                 return err;
3231
3232                         /* Upon disabling and re-enabling a transmit virtqueue, the device must
3233                          * set the coalescing parameters of the virtqueue to those configured
3234                          * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver
3235                          * did not set any TX coalescing parameters, to 0.
3236                          */
3237                         err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
3238                                                                vi->intr_coal_tx.max_usecs,
3239                                                                vi->intr_coal_tx.max_packets);
3240                         if (err)
3241                                 return err;
3242                 }
3243
3244                 if (ring->rx_pending != rx_pending) {
3245                         err = virtnet_rx_resize(vi, rq, ring->rx_pending);
3246                         if (err)
3247                                 return err;
3248
3249                         /* The reason is same as the transmit virtqueue reset */
3250                         mutex_lock(&vi->rq[i].dim_lock);
3251                         err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
3252                                                                vi->intr_coal_rx.max_usecs,
3253                                                                vi->intr_coal_rx.max_packets);
3254                         mutex_unlock(&vi->rq[i].dim_lock);
3255                         if (err)
3256                                 return err;
3257                 }
3258         }
3259
3260         return 0;
3261 }
3262
3263 static bool virtnet_commit_rss_command(struct virtnet_info *vi)
3264 {
3265         struct net_device *dev = vi->dev;
3266         struct scatterlist sgs[4];
3267         unsigned int sg_buf_size;
3268
3269         /* prepare sgs */
3270         sg_init_table(sgs, 4);
3271
3272         sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
3273         sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
3274
3275         sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
3276         sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
3277
3278         sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
3279                         - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
3280         sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
3281
3282         sg_buf_size = vi->rss_key_size;
3283         sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
3284
3285         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
3286                                   vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
3287                                   : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
3288                 goto err;
3289
3290         return true;
3291
3292 err:
3293         dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
3294         return false;
3295
3296 }
3297
3298 static void virtnet_init_default_rss(struct virtnet_info *vi)
3299 {
3300         u32 indir_val = 0;
3301         int i = 0;
3302
3303         vi->rss.hash_types = vi->rss_hash_types_supported;
3304         vi->rss_hash_types_saved = vi->rss_hash_types_supported;
3305         vi->rss.indirection_table_mask = vi->rss_indir_table_size
3306                                                 ? vi->rss_indir_table_size - 1 : 0;
3307         vi->rss.unclassified_queue = 0;
3308
3309         for (; i < vi->rss_indir_table_size; ++i) {
3310                 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
3311                 vi->rss.indirection_table[i] = indir_val;
3312         }
3313
3314         vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
3315         vi->rss.hash_key_length = vi->rss_key_size;
3316
3317         netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
3318 }
3319
3320 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
3321 {
3322         info->data = 0;
3323         switch (info->flow_type) {
3324         case TCP_V4_FLOW:
3325                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
3326                         info->data = RXH_IP_SRC | RXH_IP_DST |
3327                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
3328                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3329                         info->data = RXH_IP_SRC | RXH_IP_DST;
3330                 }
3331                 break;
3332         case TCP_V6_FLOW:
3333                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
3334                         info->data = RXH_IP_SRC | RXH_IP_DST |
3335                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
3336                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3337                         info->data = RXH_IP_SRC | RXH_IP_DST;
3338                 }
3339                 break;
3340         case UDP_V4_FLOW:
3341                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
3342                         info->data = RXH_IP_SRC | RXH_IP_DST |
3343                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
3344                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3345                         info->data = RXH_IP_SRC | RXH_IP_DST;
3346                 }
3347                 break;
3348         case UDP_V6_FLOW:
3349                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
3350                         info->data = RXH_IP_SRC | RXH_IP_DST |
3351                                                  RXH_L4_B_0_1 | RXH_L4_B_2_3;
3352                 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3353                         info->data = RXH_IP_SRC | RXH_IP_DST;
3354                 }
3355                 break;
3356         case IPV4_FLOW:
3357                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
3358                         info->data = RXH_IP_SRC | RXH_IP_DST;
3359
3360                 break;
3361         case IPV6_FLOW:
3362                 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3363                         info->data = RXH_IP_SRC | RXH_IP_DST;
3364
3365                 break;
3366         default:
3367                 info->data = 0;
3368                 break;
3369         }
3370 }
3371
3372 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3373 {
3374         u32 new_hashtypes = vi->rss_hash_types_saved;
3375         bool is_disable = info->data & RXH_DISCARD;
3376         bool is_l4 = info->data == (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3);
3377
3378         /* supports only 'sd', 'sdfn' and 'r' */
3379         if (!((info->data == (RXH_IP_SRC | RXH_IP_DST)) | is_l4 | is_disable))
3380                 return false;
3381
3382         switch (info->flow_type) {
3383         case TCP_V4_FLOW:
3384                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_TCPv4);
3385                 if (!is_disable)
3386                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3387                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv4 : 0);
3388                 break;
3389         case UDP_V4_FLOW:
3390                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv4 | VIRTIO_NET_RSS_HASH_TYPE_UDPv4);
3391                 if (!is_disable)
3392                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv4
3393                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv4 : 0);
3394                 break;
3395         case IPV4_FLOW:
3396                 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3397                 if (!is_disable)
3398                         new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv4;
3399                 break;
3400         case TCP_V6_FLOW:
3401                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_TCPv6);
3402                 if (!is_disable)
3403                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3404                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_TCPv6 : 0);
3405                 break;
3406         case UDP_V6_FLOW:
3407                 new_hashtypes &= ~(VIRTIO_NET_RSS_HASH_TYPE_IPv6 | VIRTIO_NET_RSS_HASH_TYPE_UDPv6);
3408                 if (!is_disable)
3409                         new_hashtypes |= VIRTIO_NET_RSS_HASH_TYPE_IPv6
3410                                 | (is_l4 ? VIRTIO_NET_RSS_HASH_TYPE_UDPv6 : 0);
3411                 break;
3412         case IPV6_FLOW:
3413                 new_hashtypes &= ~VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3414                 if (!is_disable)
3415                         new_hashtypes = VIRTIO_NET_RSS_HASH_TYPE_IPv6;
3416                 break;
3417         default:
3418                 /* unsupported flow */
3419                 return false;
3420         }
3421
3422         /* if unsupported hashtype was set */
3423         if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3424                 return false;
3425
3426         if (new_hashtypes != vi->rss_hash_types_saved) {
3427                 vi->rss_hash_types_saved = new_hashtypes;
3428                 vi->rss.hash_types = vi->rss_hash_types_saved;
3429                 if (vi->dev->features & NETIF_F_RXHASH)
3430                         return virtnet_commit_rss_command(vi);
3431         }
3432
3433         return true;
3434 }
3435
3436 static void virtnet_get_drvinfo(struct net_device *dev,
3437                                 struct ethtool_drvinfo *info)
3438 {
3439         struct virtnet_info *vi = netdev_priv(dev);
3440         struct virtio_device *vdev = vi->vdev;
3441
3442         strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
3443         strscpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
3444         strscpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
3445
3446 }
3447
3448 /* TODO: Eliminate OOO packets during switching */
3449 static int virtnet_set_channels(struct net_device *dev,
3450                                 struct ethtool_channels *channels)
3451 {
3452         struct virtnet_info *vi = netdev_priv(dev);
3453         u16 queue_pairs = channels->combined_count;
3454         int err;
3455
3456         /* We don't support separate rx/tx channels.
3457          * We don't allow setting 'other' channels.
3458          */
3459         if (channels->rx_count || channels->tx_count || channels->other_count)
3460                 return -EINVAL;
3461
3462         if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3463                 return -EINVAL;
3464
3465         /* For now we don't support modifying channels while XDP is loaded
3466          * also when XDP is loaded all RX queues have XDP programs so we only
3467          * need to check a single RX queue.
3468          */
3469         if (vi->rq[0].xdp_prog)
3470                 return -EINVAL;
3471
3472         cpus_read_lock();
3473         err = virtnet_set_queues(vi, queue_pairs);
3474         if (err) {
3475                 cpus_read_unlock();
3476                 goto err;
3477         }
3478         virtnet_set_affinity(vi);
3479         cpus_read_unlock();
3480
3481         netif_set_real_num_tx_queues(dev, queue_pairs);
3482         netif_set_real_num_rx_queues(dev, queue_pairs);
3483  err:
3484         return err;
3485 }
3486
3487 static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt,
3488                                   int num, int qid, const struct virtnet_stat_desc *desc)
3489 {
3490         int i;
3491
3492         if (qid < 0) {
3493                 for (i = 0; i < num; ++i)
3494                         ethtool_sprintf(p, noq_fmt, desc[i].desc);
3495         } else {
3496                 for (i = 0; i < num; ++i)
3497                         ethtool_sprintf(p, fmt, qid, desc[i].desc);
3498         }
3499 }
3500
3501 /* qid == -1: for rx/tx queue total field */
3502 static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
3503 {
3504         const struct virtnet_stat_desc *desc;
3505         const char *fmt, *noq_fmt;
3506         u8 *p = *data;
3507         u32 num;
3508
3509         if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) {
3510                 noq_fmt = "cq_hw_%s";
3511
3512                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
3513                         desc = &virtnet_stats_cvq_desc[0];
3514                         num = ARRAY_SIZE(virtnet_stats_cvq_desc);
3515
3516                         virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc);
3517                 }
3518         }
3519
3520         if (type == VIRTNET_Q_TYPE_RX) {
3521                 fmt = "rx%u_%s";
3522                 noq_fmt = "rx_%s";
3523
3524                 desc = &virtnet_rq_stats_desc[0];
3525                 num = ARRAY_SIZE(virtnet_rq_stats_desc);
3526
3527                 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
3528
3529                 fmt = "rx%u_hw_%s";
3530                 noq_fmt = "rx_hw_%s";
3531
3532                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
3533                         desc = &virtnet_stats_rx_basic_desc[0];
3534                         num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
3535
3536                         virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
3537                 }
3538
3539                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
3540                         desc = &virtnet_stats_rx_csum_desc[0];
3541                         num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
3542
3543                         virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
3544                 }
3545
3546                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
3547                         desc = &virtnet_stats_rx_speed_desc[0];
3548                         num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
3549
3550                         virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
3551                 }
3552         }
3553
3554         if (type == VIRTNET_Q_TYPE_TX) {
3555                 fmt = "tx%u_%s";
3556                 noq_fmt = "tx_%s";
3557
3558                 desc = &virtnet_sq_stats_desc[0];
3559                 num = ARRAY_SIZE(virtnet_sq_stats_desc);
3560
3561                 virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
3562
3563                 fmt = "tx%u_hw_%s";
3564                 noq_fmt = "tx_hw_%s";
3565
3566                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
3567                         desc = &virtnet_stats_tx_basic_desc[0];
3568                         num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
3569
3570                         virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
3571                 }
3572
3573                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
3574                         desc = &virtnet_stats_tx_gso_desc[0];
3575                         num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
3576
3577                         virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
3578                 }
3579
3580                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
3581                         desc = &virtnet_stats_tx_speed_desc[0];
3582                         num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
3583
3584                         virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
3585                 }
3586         }
3587
3588         *data = p;
3589 }
3590
3591 struct virtnet_stats_ctx {
3592         /* The stats are write to qstats or ethtool -S */
3593         bool to_qstat;
3594
3595         /* Used to calculate the offset inside the output buffer. */
3596         u32 desc_num[3];
3597
3598         /* The actual supported stat types. */
3599         u32 bitmap[3];
3600
3601         /* Used to calculate the reply buffer size. */
3602         u32 size[3];
3603
3604         /* Record the output buffer. */
3605         u64 *data;
3606 };
3607
3608 static void virtnet_stats_ctx_init(struct virtnet_info *vi,
3609                                    struct virtnet_stats_ctx *ctx,
3610                                    u64 *data, bool to_qstat)
3611 {
3612         u32 queue_type;
3613
3614         ctx->data = data;
3615         ctx->to_qstat = to_qstat;
3616
3617         if (to_qstat) {
3618                 ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
3619                 ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
3620
3621                 queue_type = VIRTNET_Q_TYPE_RX;
3622
3623                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
3624                         ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
3625                         ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
3626                         ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_basic);
3627                 }
3628
3629                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
3630                         ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
3631                         ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
3632                         ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_csum);
3633                 }
3634
3635                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
3636                         ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_GSO;
3637                         ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
3638                         ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_gso);
3639                 }
3640
3641                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
3642                         ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
3643                         ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
3644                         ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_speed);
3645                 }
3646
3647                 queue_type = VIRTNET_Q_TYPE_TX;
3648
3649                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
3650                         ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
3651                         ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
3652                         ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_basic);
3653                 }
3654
3655                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
3656                         ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_CSUM;
3657                         ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
3658                         ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_csum);
3659                 }
3660
3661                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
3662                         ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_GSO;
3663                         ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
3664                         ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_gso);
3665                 }
3666
3667                 if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
3668                         ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
3669                         ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
3670                         ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_speed);
3671                 }
3672
3673                 return;
3674         }
3675
3676         ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc);
3677         ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc);
3678
3679         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
3680                 queue_type = VIRTNET_Q_TYPE_CQ;
3681
3682                 ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_CVQ;
3683                 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc);
3684                 ctx->size[queue_type]     += sizeof(struct virtio_net_stats_cvq);
3685         }
3686
3687         queue_type = VIRTNET_Q_TYPE_RX;
3688
3689         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
3690                 ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
3691                 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc);
3692                 ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_basic);
3693         }
3694
3695         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
3696                 ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
3697                 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc);
3698                 ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_csum);
3699         }
3700
3701         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
3702                 ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
3703                 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc);
3704                 ctx->size[queue_type]     += sizeof(struct virtio_net_stats_rx_speed);
3705         }
3706
3707         queue_type = VIRTNET_Q_TYPE_TX;
3708
3709         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
3710                 ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
3711                 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc);
3712                 ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_basic);
3713         }
3714
3715         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
3716                 ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_GSO;
3717                 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc);
3718                 ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_gso);
3719         }
3720
3721         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
3722                 ctx->bitmap[queue_type]   |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
3723                 ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc);
3724                 ctx->size[queue_type]     += sizeof(struct virtio_net_stats_tx_speed);
3725         }
3726 }
3727
3728 /* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
3729  * @sum: the position to store the sum values
3730  * @num: field num
3731  * @q_value: the first queue fields
3732  * @q_num: number of the queues
3733  */
3734 static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
3735 {
3736         u32 step = num;
3737         int i, j;
3738         u64 *p;
3739
3740         for (i = 0; i < num; ++i) {
3741                 p = sum + i;
3742                 *p = 0;
3743
3744                 for (j = 0; j < q_num; ++j)
3745                         *p += *(q_value + i + j * step);
3746         }
3747 }
3748
3749 static void virtnet_fill_total_fields(struct virtnet_info *vi,
3750                                       struct virtnet_stats_ctx *ctx)
3751 {
3752         u64 *data, *first_rx_q, *first_tx_q;
3753         u32 num_cq, num_rx, num_tx;
3754
3755         num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
3756         num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
3757         num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
3758
3759         first_rx_q = ctx->data + num_rx + num_tx + num_cq;
3760         first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx;
3761
3762         data = ctx->data;
3763
3764         stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs);
3765
3766         data = ctx->data + num_rx;
3767
3768         stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs);
3769 }
3770
3771 static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid,
3772                                      struct virtnet_stats_ctx *ctx,
3773                                      const u8 *base, bool drv_stats, u8 reply_type)
3774 {
3775         const struct virtnet_stat_desc *desc;
3776         const u64_stats_t *v_stat;
3777         u64 offset, bitmap;
3778         const __le64 *v;
3779         u32 queue_type;
3780         int i, num;
3781
3782         queue_type = vq_type(vi, qid);
3783         bitmap = ctx->bitmap[queue_type];
3784
3785         if (drv_stats) {
3786                 if (queue_type == VIRTNET_Q_TYPE_RX) {
3787                         desc = &virtnet_rq_stats_desc_qstat[0];
3788                         num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
3789                 } else {
3790                         desc = &virtnet_sq_stats_desc_qstat[0];
3791                         num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
3792                 }
3793
3794                 for (i = 0; i < num; ++i) {
3795                         offset = desc[i].qstat_offset / sizeof(*ctx->data);
3796                         v_stat = (const u64_stats_t *)(base + desc[i].offset);
3797                         ctx->data[offset] = u64_stats_read(v_stat);
3798                 }
3799                 return;
3800         }
3801
3802         if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
3803                 desc = &virtnet_stats_rx_basic_desc_qstat[0];
3804                 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
3805                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
3806                         goto found;
3807         }
3808
3809         if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
3810                 desc = &virtnet_stats_rx_csum_desc_qstat[0];
3811                 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
3812                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
3813                         goto found;
3814         }
3815
3816         if (bitmap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
3817                 desc = &virtnet_stats_rx_gso_desc_qstat[0];
3818                 num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
3819                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO)
3820                         goto found;
3821         }
3822
3823         if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
3824                 desc = &virtnet_stats_rx_speed_desc_qstat[0];
3825                 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
3826                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
3827                         goto found;
3828         }
3829
3830         if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
3831                 desc = &virtnet_stats_tx_basic_desc_qstat[0];
3832                 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
3833                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
3834                         goto found;
3835         }
3836
3837         if (bitmap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
3838                 desc = &virtnet_stats_tx_csum_desc_qstat[0];
3839                 num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
3840                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM)
3841                         goto found;
3842         }
3843
3844         if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
3845                 desc = &virtnet_stats_tx_gso_desc_qstat[0];
3846                 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
3847                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
3848                         goto found;
3849         }
3850
3851         if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
3852                 desc = &virtnet_stats_tx_speed_desc_qstat[0];
3853                 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
3854                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
3855                         goto found;
3856         }
3857
3858         return;
3859
3860 found:
3861         for (i = 0; i < num; ++i) {
3862                 offset = desc[i].qstat_offset / sizeof(*ctx->data);
3863                 v = (const __le64 *)(base + desc[i].offset);
3864                 ctx->data[offset] = le64_to_cpu(*v);
3865         }
3866 }
3867
3868 /* virtnet_fill_stats - copy the stats to qstats or ethtool -S
3869  * The stats source is the device or the driver.
3870  *
3871  * @vi: virtio net info
3872  * @qid: the vq id
3873  * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
3874  * @base: pointer to the device reply or the driver stats structure.
3875  * @drv_stats: designate the base type (device reply, driver stats)
3876  * @type: the type of the device reply (if drv_stats is true, this must be zero)
3877  */
3878 static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
3879                                struct virtnet_stats_ctx *ctx,
3880                                const u8 *base, bool drv_stats, u8 reply_type)
3881 {
3882         u32 queue_type, num_rx, num_tx, num_cq;
3883         const struct virtnet_stat_desc *desc;
3884         const u64_stats_t *v_stat;
3885         u64 offset, bitmap;
3886         const __le64 *v;
3887         int i, num;
3888
3889         if (ctx->to_qstat)
3890                 return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type);
3891
3892         num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
3893         num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
3894         num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
3895
3896         queue_type = vq_type(vi, qid);
3897         bitmap = ctx->bitmap[queue_type];
3898
3899         /* skip the total fields of pairs */
3900         offset = num_rx + num_tx;
3901
3902         if (queue_type == VIRTNET_Q_TYPE_TX) {
3903                 offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
3904
3905                 num = ARRAY_SIZE(virtnet_sq_stats_desc);
3906                 if (drv_stats) {
3907                         desc = &virtnet_sq_stats_desc[0];
3908                         goto drv_stats;
3909                 }
3910
3911                 offset += num;
3912
3913         } else if (queue_type == VIRTNET_Q_TYPE_RX) {
3914                 offset += num_cq + num_rx * (qid / 2);
3915
3916                 num = ARRAY_SIZE(virtnet_rq_stats_desc);
3917                 if (drv_stats) {
3918                         desc = &virtnet_rq_stats_desc[0];
3919                         goto drv_stats;
3920                 }
3921
3922                 offset += num;
3923         }
3924
3925         if (bitmap & VIRTIO_NET_STATS_TYPE_CVQ) {
3926                 desc = &virtnet_stats_cvq_desc[0];
3927                 num = ARRAY_SIZE(virtnet_stats_cvq_desc);
3928                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_CVQ)
3929                         goto found;
3930
3931                 offset += num;
3932         }
3933
3934         if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
3935                 desc = &virtnet_stats_rx_basic_desc[0];
3936                 num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
3937                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
3938                         goto found;
3939
3940                 offset += num;
3941         }
3942
3943         if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
3944                 desc = &virtnet_stats_rx_csum_desc[0];
3945                 num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
3946                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
3947                         goto found;
3948
3949                 offset += num;
3950         }
3951
3952         if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
3953                 desc = &virtnet_stats_rx_speed_desc[0];
3954                 num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
3955                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
3956                         goto found;
3957
3958                 offset += num;
3959         }
3960
3961         if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
3962                 desc = &virtnet_stats_tx_basic_desc[0];
3963                 num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
3964                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
3965                         goto found;
3966
3967                 offset += num;
3968         }
3969
3970         if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
3971                 desc = &virtnet_stats_tx_gso_desc[0];
3972                 num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
3973                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
3974                         goto found;
3975
3976                 offset += num;
3977         }
3978
3979         if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
3980                 desc = &virtnet_stats_tx_speed_desc[0];
3981                 num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
3982                 if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
3983                         goto found;
3984
3985                 offset += num;
3986         }
3987
3988         return;
3989
3990 found:
3991         for (i = 0; i < num; ++i) {
3992                 v = (const __le64 *)(base + desc[i].offset);
3993                 ctx->data[offset + i] = le64_to_cpu(*v);
3994         }
3995
3996         return;
3997
3998 drv_stats:
3999         for (i = 0; i < num; ++i) {
4000                 v_stat = (const u64_stats_t *)(base + desc[i].offset);
4001                 ctx->data[offset + i] = u64_stats_read(v_stat);
4002         }
4003 }
4004
4005 static int __virtnet_get_hw_stats(struct virtnet_info *vi,
4006                                   struct virtnet_stats_ctx *ctx,
4007                                   struct virtio_net_ctrl_queue_stats *req,
4008                                   int req_size, void *reply, int res_size)
4009 {
4010         struct virtio_net_stats_reply_hdr *hdr;
4011         struct scatterlist sgs_in, sgs_out;
4012         void *p;
4013         u32 qid;
4014         int ok;
4015
4016         sg_init_one(&sgs_out, req, req_size);
4017         sg_init_one(&sgs_in, reply, res_size);
4018
4019         ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
4020                                         VIRTIO_NET_CTRL_STATS_GET,
4021                                         &sgs_out, &sgs_in);
4022
4023         if (!ok)
4024                 return ok;
4025
4026         for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
4027                 hdr = p;
4028                 qid = le16_to_cpu(hdr->vq_index);
4029                 virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
4030         }
4031
4032         return 0;
4033 }
4034
4035 static void virtnet_make_stat_req(struct virtnet_info *vi,
4036                                   struct virtnet_stats_ctx *ctx,
4037                                   struct virtio_net_ctrl_queue_stats *req,
4038                                   int qid, int *idx)
4039 {
4040         int qtype = vq_type(vi, qid);
4041         u64 bitmap = ctx->bitmap[qtype];
4042
4043         if (!bitmap)
4044                 return;
4045
4046         req->stats[*idx].vq_index = cpu_to_le16(qid);
4047         req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
4048         *idx += 1;
4049 }
4050
4051 /* qid: -1: get stats of all vq.
4052  *     > 0: get the stats for the special vq. This must not be cvq.
4053  */
4054 static int virtnet_get_hw_stats(struct virtnet_info *vi,
4055                                 struct virtnet_stats_ctx *ctx, int qid)
4056 {
4057         int qnum, i, j, res_size, qtype, last_vq, first_vq;
4058         struct virtio_net_ctrl_queue_stats *req;
4059         bool enable_cvq;
4060         void *reply;
4061         int ok;
4062
4063         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
4064                 return 0;
4065
4066         if (qid == -1) {
4067                 last_vq = vi->curr_queue_pairs * 2 - 1;
4068                 first_vq = 0;
4069                 enable_cvq = true;
4070         } else {
4071                 last_vq = qid;
4072                 first_vq = qid;
4073                 enable_cvq = false;
4074         }
4075
4076         qnum = 0;
4077         res_size = 0;
4078         for (i = first_vq; i <= last_vq ; ++i) {
4079                 qtype = vq_type(vi, i);
4080                 if (ctx->bitmap[qtype]) {
4081                         ++qnum;
4082                         res_size += ctx->size[qtype];
4083                 }
4084         }
4085
4086         if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
4087                 res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
4088                 qnum += 1;
4089         }
4090
4091         req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
4092         if (!req)
4093                 return -ENOMEM;
4094
4095         reply = kmalloc(res_size, GFP_KERNEL);
4096         if (!reply) {
4097                 kfree(req);
4098                 return -ENOMEM;
4099         }
4100
4101         j = 0;
4102         for (i = first_vq; i <= last_vq ; ++i)
4103                 virtnet_make_stat_req(vi, ctx, req, i, &j);
4104
4105         if (enable_cvq)
4106                 virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
4107
4108         ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
4109
4110         kfree(req);
4111         kfree(reply);
4112
4113         return ok;
4114 }
4115
4116 static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4117 {
4118         struct virtnet_info *vi = netdev_priv(dev);
4119         unsigned int i;
4120         u8 *p = data;
4121
4122         switch (stringset) {
4123         case ETH_SS_STATS:
4124                 /* Generate the total field names. */
4125                 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p);
4126                 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p);
4127
4128                 virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
4129
4130                 for (i = 0; i < vi->curr_queue_pairs; ++i)
4131                         virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
4132
4133                 for (i = 0; i < vi->curr_queue_pairs; ++i)
4134                         virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
4135                 break;
4136         }
4137 }
4138
4139 static int virtnet_get_sset_count(struct net_device *dev, int sset)
4140 {
4141         struct virtnet_info *vi = netdev_priv(dev);
4142         struct virtnet_stats_ctx ctx = {0};
4143         u32 pair_count;
4144
4145         switch (sset) {
4146         case ETH_SS_STATS:
4147                 virtnet_stats_ctx_init(vi, &ctx, NULL, false);
4148
4149                 pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
4150
4151                 return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] +
4152                         vi->curr_queue_pairs * pair_count;
4153         default:
4154                 return -EOPNOTSUPP;
4155         }
4156 }
4157
4158 static void virtnet_get_ethtool_stats(struct net_device *dev,
4159                                       struct ethtool_stats *stats, u64 *data)
4160 {
4161         struct virtnet_info *vi = netdev_priv(dev);
4162         struct virtnet_stats_ctx ctx = {0};
4163         unsigned int start, i;
4164         const u8 *stats_base;
4165
4166         virtnet_stats_ctx_init(vi, &ctx, data, false);
4167         if (virtnet_get_hw_stats(vi, &ctx, -1))
4168                 dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
4169
4170         for (i = 0; i < vi->curr_queue_pairs; i++) {
4171                 struct receive_queue *rq = &vi->rq[i];
4172                 struct send_queue *sq = &vi->sq[i];
4173
4174                 stats_base = (const u8 *)&rq->stats;
4175                 do {
4176                         start = u64_stats_fetch_begin(&rq->stats.syncp);
4177                         virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
4178                 } while (u64_stats_fetch_retry(&rq->stats.syncp, start));
4179
4180                 stats_base = (const u8 *)&sq->stats;
4181                 do {
4182                         start = u64_stats_fetch_begin(&sq->stats.syncp);
4183                         virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
4184                 } while (u64_stats_fetch_retry(&sq->stats.syncp, start));
4185         }
4186
4187         virtnet_fill_total_fields(vi, &ctx);
4188 }
4189
4190 static void virtnet_get_channels(struct net_device *dev,
4191                                  struct ethtool_channels *channels)
4192 {
4193         struct virtnet_info *vi = netdev_priv(dev);
4194
4195         channels->combined_count = vi->curr_queue_pairs;
4196         channels->max_combined = vi->max_queue_pairs;
4197         channels->max_other = 0;
4198         channels->rx_count = 0;
4199         channels->tx_count = 0;
4200         channels->other_count = 0;
4201 }
4202
4203 static int virtnet_set_link_ksettings(struct net_device *dev,
4204                                       const struct ethtool_link_ksettings *cmd)
4205 {
4206         struct virtnet_info *vi = netdev_priv(dev);
4207
4208         return ethtool_virtdev_set_link_ksettings(dev, cmd,
4209                                                   &vi->speed, &vi->duplex);
4210 }
4211
4212 static int virtnet_get_link_ksettings(struct net_device *dev,
4213                                       struct ethtool_link_ksettings *cmd)
4214 {
4215         struct virtnet_info *vi = netdev_priv(dev);
4216
4217         cmd->base.speed = vi->speed;
4218         cmd->base.duplex = vi->duplex;
4219         cmd->base.port = PORT_OTHER;
4220
4221         return 0;
4222 }
4223
4224 static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
4225                                           struct ethtool_coalesce *ec)
4226 {
4227         struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
4228         struct scatterlist sgs_tx;
4229         int i;
4230
4231         coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
4232         if (!coal_tx)
4233                 return -ENOMEM;
4234
4235         coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
4236         coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
4237         sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
4238
4239         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
4240                                   VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
4241                                   &sgs_tx))
4242                 return -EINVAL;
4243
4244         vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
4245         vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
4246         for (i = 0; i < vi->max_queue_pairs; i++) {
4247                 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
4248                 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
4249         }
4250
4251         return 0;
4252 }
4253
4254 static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
4255                                           struct ethtool_coalesce *ec)
4256 {
4257         struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
4258         bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
4259         struct scatterlist sgs_rx;
4260         int ret = 0;
4261         int i;
4262
4263         if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
4264                 return -EOPNOTSUPP;
4265
4266         if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
4267                                ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
4268                 return -EINVAL;
4269
4270         /* Acquire all queues dim_locks */
4271         for (i = 0; i < vi->max_queue_pairs; i++)
4272                 mutex_lock(&vi->rq[i].dim_lock);
4273
4274         if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
4275                 vi->rx_dim_enabled = true;
4276                 for (i = 0; i < vi->max_queue_pairs; i++)
4277                         vi->rq[i].dim_enabled = true;
4278                 goto unlock;
4279         }
4280
4281         coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
4282         if (!coal_rx) {
4283                 ret = -ENOMEM;
4284                 goto unlock;
4285         }
4286
4287         if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
4288                 vi->rx_dim_enabled = false;
4289                 for (i = 0; i < vi->max_queue_pairs; i++)
4290                         vi->rq[i].dim_enabled = false;
4291         }
4292
4293         /* Since the per-queue coalescing params can be set,
4294          * we need apply the global new params even if they
4295          * are not updated.
4296          */
4297         coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
4298         coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
4299         sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
4300
4301         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
4302                                   VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
4303                                   &sgs_rx)) {
4304                 ret = -EINVAL;
4305                 goto unlock;
4306         }
4307
4308         vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
4309         vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
4310         for (i = 0; i < vi->max_queue_pairs; i++) {
4311                 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
4312                 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
4313         }
4314 unlock:
4315         for (i = vi->max_queue_pairs - 1; i >= 0; i--)
4316                 mutex_unlock(&vi->rq[i].dim_lock);
4317
4318         return ret;
4319 }
4320
4321 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
4322                                        struct ethtool_coalesce *ec)
4323 {
4324         int err;
4325
4326         err = virtnet_send_tx_notf_coal_cmds(vi, ec);
4327         if (err)
4328                 return err;
4329
4330         err = virtnet_send_rx_notf_coal_cmds(vi, ec);
4331         if (err)
4332                 return err;
4333
4334         return 0;
4335 }
4336
4337 static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
4338                                              struct ethtool_coalesce *ec,
4339                                              u16 queue)
4340 {
4341         bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
4342         u32 max_usecs, max_packets;
4343         bool cur_rx_dim;
4344         int err;
4345
4346         mutex_lock(&vi->rq[queue].dim_lock);
4347         cur_rx_dim = vi->rq[queue].dim_enabled;
4348         max_usecs = vi->rq[queue].intr_coal.max_usecs;
4349         max_packets = vi->rq[queue].intr_coal.max_packets;
4350
4351         if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
4352                                ec->rx_max_coalesced_frames != max_packets)) {
4353                 mutex_unlock(&vi->rq[queue].dim_lock);
4354                 return -EINVAL;
4355         }
4356
4357         if (rx_ctrl_dim_on && !cur_rx_dim) {
4358                 vi->rq[queue].dim_enabled = true;
4359                 mutex_unlock(&vi->rq[queue].dim_lock);
4360                 return 0;
4361         }
4362
4363         if (!rx_ctrl_dim_on && cur_rx_dim)
4364                 vi->rq[queue].dim_enabled = false;
4365
4366         /* If no params are updated, userspace ethtool will
4367          * reject the modification.
4368          */
4369         err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
4370                                                ec->rx_coalesce_usecs,
4371                                                ec->rx_max_coalesced_frames);
4372         mutex_unlock(&vi->rq[queue].dim_lock);
4373         return err;
4374 }
4375
4376 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
4377                                           struct ethtool_coalesce *ec,
4378                                           u16 queue)
4379 {
4380         int err;
4381
4382         err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
4383         if (err)
4384                 return err;
4385
4386         err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
4387                                                ec->tx_coalesce_usecs,
4388                                                ec->tx_max_coalesced_frames);
4389         if (err)
4390                 return err;
4391
4392         return 0;
4393 }
4394
4395 static void virtnet_rx_dim_work(struct work_struct *work)
4396 {
4397         struct dim *dim = container_of(work, struct dim, work);
4398         struct receive_queue *rq = container_of(dim,
4399                         struct receive_queue, dim);
4400         struct virtnet_info *vi = rq->vq->vdev->priv;
4401         struct net_device *dev = vi->dev;
4402         struct dim_cq_moder update_moder;
4403         int qnum, err;
4404
4405         qnum = rq - vi->rq;
4406
4407         mutex_lock(&rq->dim_lock);
4408         if (!rq->dim_enabled)
4409                 goto out;
4410
4411         update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
4412         if (update_moder.usec != rq->intr_coal.max_usecs ||
4413             update_moder.pkts != rq->intr_coal.max_packets) {
4414                 err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
4415                                                        update_moder.usec,
4416                                                        update_moder.pkts);
4417                 if (err)
4418                         pr_debug("%s: Failed to send dim parameters on rxq%d\n",
4419                                  dev->name, qnum);
4420                 dim->state = DIM_START_MEASURE;
4421         }
4422 out:
4423         mutex_unlock(&rq->dim_lock);
4424 }
4425
4426 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
4427 {
4428         /* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
4429          * or VIRTIO_NET_F_VQ_NOTF_COAL feature is negotiated.
4430          */
4431         if (ec->rx_coalesce_usecs || ec->tx_coalesce_usecs)
4432                 return -EOPNOTSUPP;
4433
4434         if (ec->tx_max_coalesced_frames > 1 ||
4435             ec->rx_max_coalesced_frames != 1)
4436                 return -EINVAL;
4437
4438         return 0;
4439 }
4440
4441 static int virtnet_should_update_vq_weight(int dev_flags, int weight,
4442                                            int vq_weight, bool *should_update)
4443 {
4444         if (weight ^ vq_weight) {
4445                 if (dev_flags & IFF_UP)
4446                         return -EBUSY;
4447                 *should_update = true;
4448         }
4449
4450         return 0;
4451 }
4452
4453 static int virtnet_set_coalesce(struct net_device *dev,
4454                                 struct ethtool_coalesce *ec,
4455                                 struct kernel_ethtool_coalesce *kernel_coal,
4456                                 struct netlink_ext_ack *extack)
4457 {
4458         struct virtnet_info *vi = netdev_priv(dev);
4459         int ret, queue_number, napi_weight;
4460         bool update_napi = false;
4461
4462         /* Can't change NAPI weight if the link is up */
4463         napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
4464         for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
4465                 ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
4466                                                       vi->sq[queue_number].napi.weight,
4467                                                       &update_napi);
4468                 if (ret)
4469                         return ret;
4470
4471                 if (update_napi) {
4472                         /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
4473                          * updated for the sake of simplicity, which might not be necessary
4474                          */
4475                         break;
4476                 }
4477         }
4478
4479         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
4480                 ret = virtnet_send_notf_coal_cmds(vi, ec);
4481         else
4482                 ret = virtnet_coal_params_supported(ec);
4483
4484         if (ret)
4485                 return ret;
4486
4487         if (update_napi) {
4488                 for (; queue_number < vi->max_queue_pairs; queue_number++)
4489                         vi->sq[queue_number].napi.weight = napi_weight;
4490         }
4491
4492         return ret;
4493 }
4494
4495 static int virtnet_get_coalesce(struct net_device *dev,
4496                                 struct ethtool_coalesce *ec,
4497                                 struct kernel_ethtool_coalesce *kernel_coal,
4498                                 struct netlink_ext_ack *extack)
4499 {
4500         struct virtnet_info *vi = netdev_priv(dev);
4501
4502         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4503                 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
4504                 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
4505                 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
4506                 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
4507                 ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
4508         } else {
4509                 ec->rx_max_coalesced_frames = 1;
4510
4511                 if (vi->sq[0].napi.weight)
4512                         ec->tx_max_coalesced_frames = 1;
4513         }
4514
4515         return 0;
4516 }
4517
4518 static int virtnet_set_per_queue_coalesce(struct net_device *dev,
4519                                           u32 queue,
4520                                           struct ethtool_coalesce *ec)
4521 {
4522         struct virtnet_info *vi = netdev_priv(dev);
4523         int ret, napi_weight;
4524         bool update_napi = false;
4525
4526         if (queue >= vi->max_queue_pairs)
4527                 return -EINVAL;
4528
4529         /* Can't change NAPI weight if the link is up */
4530         napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
4531         ret = virtnet_should_update_vq_weight(dev->flags, napi_weight,
4532                                               vi->sq[queue].napi.weight,
4533                                               &update_napi);
4534         if (ret)
4535                 return ret;
4536
4537         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
4538                 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
4539         else
4540                 ret = virtnet_coal_params_supported(ec);
4541
4542         if (ret)
4543                 return ret;
4544
4545         if (update_napi)
4546                 vi->sq[queue].napi.weight = napi_weight;
4547
4548         return 0;
4549 }
4550
4551 static int virtnet_get_per_queue_coalesce(struct net_device *dev,
4552                                           u32 queue,
4553                                           struct ethtool_coalesce *ec)
4554 {
4555         struct virtnet_info *vi = netdev_priv(dev);
4556
4557         if (queue >= vi->max_queue_pairs)
4558                 return -EINVAL;
4559
4560         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
4561                 mutex_lock(&vi->rq[queue].dim_lock);
4562                 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
4563                 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
4564                 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
4565                 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
4566                 ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
4567                 mutex_unlock(&vi->rq[queue].dim_lock);
4568         } else {
4569                 ec->rx_max_coalesced_frames = 1;
4570
4571                 if (vi->sq[queue].napi.weight)
4572                         ec->tx_max_coalesced_frames = 1;
4573         }
4574
4575         return 0;
4576 }
4577
4578 static void virtnet_init_settings(struct net_device *dev)
4579 {
4580         struct virtnet_info *vi = netdev_priv(dev);
4581
4582         vi->speed = SPEED_UNKNOWN;
4583         vi->duplex = DUPLEX_UNKNOWN;
4584 }
4585
4586 static void virtnet_update_settings(struct virtnet_info *vi)
4587 {
4588         u32 speed;
4589         u8 duplex;
4590
4591         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
4592                 return;
4593
4594         virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
4595
4596         if (ethtool_validate_speed(speed))
4597                 vi->speed = speed;
4598
4599         virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
4600
4601         if (ethtool_validate_duplex(duplex))
4602                 vi->duplex = duplex;
4603 }
4604
4605 static u32 virtnet_get_rxfh_key_size(struct net_device *dev)
4606 {
4607         return ((struct virtnet_info *)netdev_priv(dev))->rss_key_size;
4608 }
4609
4610 static u32 virtnet_get_rxfh_indir_size(struct net_device *dev)
4611 {
4612         return ((struct virtnet_info *)netdev_priv(dev))->rss_indir_table_size;
4613 }
4614
4615 static int virtnet_get_rxfh(struct net_device *dev,
4616                             struct ethtool_rxfh_param *rxfh)
4617 {
4618         struct virtnet_info *vi = netdev_priv(dev);
4619         int i;
4620
4621         if (rxfh->indir) {
4622                 for (i = 0; i < vi->rss_indir_table_size; ++i)
4623                         rxfh->indir[i] = vi->rss.indirection_table[i];
4624         }
4625
4626         if (rxfh->key)
4627                 memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
4628
4629         rxfh->hfunc = ETH_RSS_HASH_TOP;
4630
4631         return 0;
4632 }
4633
4634 static int virtnet_set_rxfh(struct net_device *dev,
4635                             struct ethtool_rxfh_param *rxfh,
4636                             struct netlink_ext_ack *extack)
4637 {
4638         struct virtnet_info *vi = netdev_priv(dev);
4639         bool update = false;
4640         int i;
4641
4642         if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
4643             rxfh->hfunc != ETH_RSS_HASH_TOP)
4644                 return -EOPNOTSUPP;
4645
4646         if (rxfh->indir) {
4647                 if (!vi->has_rss)
4648                         return -EOPNOTSUPP;
4649
4650                 for (i = 0; i < vi->rss_indir_table_size; ++i)
4651                         vi->rss.indirection_table[i] = rxfh->indir[i];
4652                 update = true;
4653         }
4654
4655         if (rxfh->key) {
4656                 /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
4657                  * device provides hash calculation capabilities, that is,
4658                  * hash_key is configured.
4659                  */
4660                 if (!vi->has_rss && !vi->has_rss_hash_report)
4661                         return -EOPNOTSUPP;
4662
4663                 memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
4664                 update = true;
4665         }
4666
4667         if (update)
4668                 virtnet_commit_rss_command(vi);
4669
4670         return 0;
4671 }
4672
4673 static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
4674 {
4675         struct virtnet_info *vi = netdev_priv(dev);
4676         int rc = 0;
4677
4678         switch (info->cmd) {
4679         case ETHTOOL_GRXRINGS:
4680                 info->data = vi->curr_queue_pairs;
4681                 break;
4682         case ETHTOOL_GRXFH:
4683                 virtnet_get_hashflow(vi, info);
4684                 break;
4685         default:
4686                 rc = -EOPNOTSUPP;
4687         }
4688
4689         return rc;
4690 }
4691
4692 static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
4693 {
4694         struct virtnet_info *vi = netdev_priv(dev);
4695         int rc = 0;
4696
4697         switch (info->cmd) {
4698         case ETHTOOL_SRXFH:
4699                 if (!virtnet_set_hashflow(vi, info))
4700                         rc = -EINVAL;
4701
4702                 break;
4703         default:
4704                 rc = -EOPNOTSUPP;
4705         }
4706
4707         return rc;
4708 }
4709
4710 static const struct ethtool_ops virtnet_ethtool_ops = {
4711         .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
4712                 ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
4713         .get_drvinfo = virtnet_get_drvinfo,
4714         .get_link = ethtool_op_get_link,
4715         .get_ringparam = virtnet_get_ringparam,
4716         .set_ringparam = virtnet_set_ringparam,
4717         .get_strings = virtnet_get_strings,
4718         .get_sset_count = virtnet_get_sset_count,
4719         .get_ethtool_stats = virtnet_get_ethtool_stats,
4720         .set_channels = virtnet_set_channels,
4721         .get_channels = virtnet_get_channels,
4722         .get_ts_info = ethtool_op_get_ts_info,
4723         .get_link_ksettings = virtnet_get_link_ksettings,
4724         .set_link_ksettings = virtnet_set_link_ksettings,
4725         .set_coalesce = virtnet_set_coalesce,
4726         .get_coalesce = virtnet_get_coalesce,
4727         .set_per_queue_coalesce = virtnet_set_per_queue_coalesce,
4728         .get_per_queue_coalesce = virtnet_get_per_queue_coalesce,
4729         .get_rxfh_key_size = virtnet_get_rxfh_key_size,
4730         .get_rxfh_indir_size = virtnet_get_rxfh_indir_size,
4731         .get_rxfh = virtnet_get_rxfh,
4732         .set_rxfh = virtnet_set_rxfh,
4733         .get_rxnfc = virtnet_get_rxnfc,
4734         .set_rxnfc = virtnet_set_rxnfc,
4735 };
4736
4737 static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
4738                                        struct netdev_queue_stats_rx *stats)
4739 {
4740         struct virtnet_info *vi = netdev_priv(dev);
4741         struct receive_queue *rq = &vi->rq[i];
4742         struct virtnet_stats_ctx ctx = {0};
4743
4744         virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
4745
4746         virtnet_get_hw_stats(vi, &ctx, i * 2);
4747         virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
4748 }
4749
4750 static void virtnet_get_queue_stats_tx(struct net_device *dev, int i,
4751                                        struct netdev_queue_stats_tx *stats)
4752 {
4753         struct virtnet_info *vi = netdev_priv(dev);
4754         struct send_queue *sq = &vi->sq[i];
4755         struct virtnet_stats_ctx ctx = {0};
4756
4757         virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
4758
4759         virtnet_get_hw_stats(vi, &ctx, i * 2 + 1);
4760         virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0);
4761 }
4762
4763 static void virtnet_get_base_stats(struct net_device *dev,
4764                                    struct netdev_queue_stats_rx *rx,
4765                                    struct netdev_queue_stats_tx *tx)
4766 {
4767         struct virtnet_info *vi = netdev_priv(dev);
4768
4769         /* The queue stats of the virtio-net will not be reset. So here we
4770          * return 0.
4771          */
4772         rx->bytes = 0;
4773         rx->packets = 0;
4774
4775         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
4776                 rx->hw_drops = 0;
4777                 rx->hw_drop_overruns = 0;
4778         }
4779
4780         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
4781                 rx->csum_unnecessary = 0;
4782                 rx->csum_none = 0;
4783                 rx->csum_bad = 0;
4784         }
4785
4786         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
4787                 rx->hw_gro_packets = 0;
4788                 rx->hw_gro_bytes = 0;
4789                 rx->hw_gro_wire_packets = 0;
4790                 rx->hw_gro_wire_bytes = 0;
4791         }
4792
4793         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED)
4794                 rx->hw_drop_ratelimits = 0;
4795
4796         tx->bytes = 0;
4797         tx->packets = 0;
4798         tx->stop = 0;
4799         tx->wake = 0;
4800
4801         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
4802                 tx->hw_drops = 0;
4803                 tx->hw_drop_errors = 0;
4804         }
4805
4806         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
4807                 tx->csum_none = 0;
4808                 tx->needs_csum = 0;
4809         }
4810
4811         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
4812                 tx->hw_gso_packets = 0;
4813                 tx->hw_gso_bytes = 0;
4814                 tx->hw_gso_wire_packets = 0;
4815                 tx->hw_gso_wire_bytes = 0;
4816         }
4817
4818         if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
4819                 tx->hw_drop_ratelimits = 0;
4820 }
4821
4822 static const struct netdev_stat_ops virtnet_stat_ops = {
4823         .get_queue_stats_rx     = virtnet_get_queue_stats_rx,
4824         .get_queue_stats_tx     = virtnet_get_queue_stats_tx,
4825         .get_base_stats         = virtnet_get_base_stats,
4826 };
4827
4828 static void virtnet_freeze_down(struct virtio_device *vdev)
4829 {
4830         struct virtnet_info *vi = vdev->priv;
4831
4832         /* Make sure no work handler is accessing the device */
4833         flush_work(&vi->config_work);
4834         disable_rx_mode_work(vi);
4835         flush_work(&vi->rx_mode_work);
4836
4837         netif_tx_lock_bh(vi->dev);
4838         netif_device_detach(vi->dev);
4839         netif_tx_unlock_bh(vi->dev);
4840         if (netif_running(vi->dev))
4841                 virtnet_close(vi->dev);
4842 }
4843
4844 static int init_vqs(struct virtnet_info *vi);
4845
4846 static int virtnet_restore_up(struct virtio_device *vdev)
4847 {
4848         struct virtnet_info *vi = vdev->priv;
4849         int err;
4850
4851         err = init_vqs(vi);
4852         if (err)
4853                 return err;
4854
4855         virtio_device_ready(vdev);
4856
4857         enable_delayed_refill(vi);
4858         enable_rx_mode_work(vi);
4859
4860         if (netif_running(vi->dev)) {
4861                 err = virtnet_open(vi->dev);
4862                 if (err)
4863                         return err;
4864         }
4865
4866         netif_tx_lock_bh(vi->dev);
4867         netif_device_attach(vi->dev);
4868         netif_tx_unlock_bh(vi->dev);
4869         return err;
4870 }
4871
4872 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
4873 {
4874         __virtio64 *_offloads __free(kfree) = NULL;
4875         struct scatterlist sg;
4876
4877         _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
4878         if (!_offloads)
4879                 return -ENOMEM;
4880
4881         *_offloads = cpu_to_virtio64(vi->vdev, offloads);
4882
4883         sg_init_one(&sg, _offloads, sizeof(*_offloads));
4884
4885         if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
4886                                   VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
4887                 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
4888                 return -EINVAL;
4889         }
4890
4891         return 0;
4892 }
4893
4894 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
4895 {
4896         u64 offloads = 0;
4897
4898         if (!vi->guest_offloads)
4899                 return 0;
4900
4901         return virtnet_set_guest_offloads(vi, offloads);
4902 }
4903
4904 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
4905 {
4906         u64 offloads = vi->guest_offloads;
4907
4908         if (!vi->guest_offloads)
4909                 return 0;
4910
4911         return virtnet_set_guest_offloads(vi, offloads);
4912 }
4913
4914 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
4915                            struct netlink_ext_ack *extack)
4916 {
4917         unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
4918                                            sizeof(struct skb_shared_info));
4919         unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
4920         struct virtnet_info *vi = netdev_priv(dev);
4921         struct bpf_prog *old_prog;
4922         u16 xdp_qp = 0, curr_qp;
4923         int i, err;
4924
4925         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
4926             && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4927                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
4928                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4929                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4930                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
4931                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
4932                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
4933                 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
4934                 return -EOPNOTSUPP;
4935         }
4936
4937         if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
4938                 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
4939                 return -EINVAL;
4940         }
4941
4942         if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
4943                 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
4944                 netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
4945                 return -EINVAL;
4946         }
4947
4948         curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
4949         if (prog)
4950                 xdp_qp = nr_cpu_ids;
4951
4952         /* XDP requires extra queues for XDP_TX */
4953         if (curr_qp + xdp_qp > vi->max_queue_pairs) {
4954                 netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
4955                                  curr_qp + xdp_qp, vi->max_queue_pairs);
4956                 xdp_qp = 0;
4957         }
4958
4959         old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
4960         if (!prog && !old_prog)
4961                 return 0;
4962
4963         if (prog)
4964                 bpf_prog_add(prog, vi->max_queue_pairs - 1);
4965
4966         /* Make sure NAPI is not using any XDP TX queues for RX. */
4967         if (netif_running(dev)) {
4968                 for (i = 0; i < vi->max_queue_pairs; i++) {
4969                         napi_disable(&vi->rq[i].napi);
4970                         virtnet_napi_tx_disable(&vi->sq[i].napi);
4971                 }
4972         }
4973
4974         if (!prog) {
4975                 for (i = 0; i < vi->max_queue_pairs; i++) {
4976                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
4977                         if (i == 0)
4978                                 virtnet_restore_guest_offloads(vi);
4979                 }
4980                 synchronize_net();
4981         }
4982
4983         err = virtnet_set_queues(vi, curr_qp + xdp_qp);
4984         if (err)
4985                 goto err;
4986         netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
4987         vi->xdp_queue_pairs = xdp_qp;
4988
4989         if (prog) {
4990                 vi->xdp_enabled = true;
4991                 for (i = 0; i < vi->max_queue_pairs; i++) {
4992                         rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
4993                         if (i == 0 && !old_prog)
4994                                 virtnet_clear_guest_offloads(vi);
4995                 }
4996                 if (!old_prog)
4997                         xdp_features_set_redirect_target(dev, true);
4998         } else {
4999                 xdp_features_clear_redirect_target(dev);
5000                 vi->xdp_enabled = false;
5001         }
5002
5003         for (i = 0; i < vi->max_queue_pairs; i++) {
5004                 if (old_prog)
5005                         bpf_prog_put(old_prog);
5006                 if (netif_running(dev)) {
5007                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
5008                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
5009                                                &vi->sq[i].napi);
5010                 }
5011         }
5012
5013         return 0;
5014
5015 err:
5016         if (!prog) {
5017                 virtnet_clear_guest_offloads(vi);
5018                 for (i = 0; i < vi->max_queue_pairs; i++)
5019                         rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
5020         }
5021
5022         if (netif_running(dev)) {
5023                 for (i = 0; i < vi->max_queue_pairs; i++) {
5024                         virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
5025                         virtnet_napi_tx_enable(vi, vi->sq[i].vq,
5026                                                &vi->sq[i].napi);
5027                 }
5028         }
5029         if (prog)
5030                 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
5031         return err;
5032 }
5033
5034 static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5035 {
5036         switch (xdp->command) {
5037         case XDP_SETUP_PROG:
5038                 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
5039         default:
5040                 return -EINVAL;
5041         }
5042 }
5043
5044 static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
5045                                       size_t len)
5046 {
5047         struct virtnet_info *vi = netdev_priv(dev);
5048         int ret;
5049
5050         if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
5051                 return -EOPNOTSUPP;
5052
5053         ret = snprintf(buf, len, "sby");
5054         if (ret >= len)
5055                 return -EOPNOTSUPP;
5056
5057         return 0;
5058 }
5059
5060 static int virtnet_set_features(struct net_device *dev,
5061                                 netdev_features_t features)
5062 {
5063         struct virtnet_info *vi = netdev_priv(dev);
5064         u64 offloads;
5065         int err;
5066
5067         if ((dev->features ^ features) & NETIF_F_GRO_HW) {
5068                 if (vi->xdp_enabled)
5069                         return -EBUSY;
5070
5071                 if (features & NETIF_F_GRO_HW)
5072                         offloads = vi->guest_offloads_capable;
5073                 else
5074                         offloads = vi->guest_offloads_capable &
5075                                    ~GUEST_OFFLOAD_GRO_HW_MASK;
5076
5077                 err = virtnet_set_guest_offloads(vi, offloads);
5078                 if (err)
5079                         return err;
5080                 vi->guest_offloads = offloads;
5081         }
5082
5083         if ((dev->features ^ features) & NETIF_F_RXHASH) {
5084                 if (features & NETIF_F_RXHASH)
5085                         vi->rss.hash_types = vi->rss_hash_types_saved;
5086                 else
5087                         vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
5088
5089                 if (!virtnet_commit_rss_command(vi))
5090                         return -EINVAL;
5091         }
5092
5093         return 0;
5094 }
5095
5096 static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
5097 {
5098         struct virtnet_info *priv = netdev_priv(dev);
5099         struct send_queue *sq = &priv->sq[txqueue];
5100         struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
5101
5102         u64_stats_update_begin(&sq->stats.syncp);
5103         u64_stats_inc(&sq->stats.tx_timeouts);
5104         u64_stats_update_end(&sq->stats.syncp);
5105
5106         netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
5107                    txqueue, sq->name, sq->vq->index, sq->vq->name,
5108                    jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
5109 }
5110
5111 static const struct net_device_ops virtnet_netdev = {
5112         .ndo_open            = virtnet_open,
5113         .ndo_stop            = virtnet_close,
5114         .ndo_start_xmit      = start_xmit,
5115         .ndo_validate_addr   = eth_validate_addr,
5116         .ndo_set_mac_address = virtnet_set_mac_address,
5117         .ndo_set_rx_mode     = virtnet_set_rx_mode,
5118         .ndo_get_stats64     = virtnet_stats,
5119         .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
5120         .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
5121         .ndo_bpf                = virtnet_xdp,
5122         .ndo_xdp_xmit           = virtnet_xdp_xmit,
5123         .ndo_features_check     = passthru_features_check,
5124         .ndo_get_phys_port_name = virtnet_get_phys_port_name,
5125         .ndo_set_features       = virtnet_set_features,
5126         .ndo_tx_timeout         = virtnet_tx_timeout,
5127 };
5128
5129 static void virtnet_config_changed_work(struct work_struct *work)
5130 {
5131         struct virtnet_info *vi =
5132                 container_of(work, struct virtnet_info, config_work);
5133         u16 v;
5134
5135         if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
5136                                  struct virtio_net_config, status, &v) < 0)
5137                 return;
5138
5139         if (v & VIRTIO_NET_S_ANNOUNCE) {
5140                 netdev_notify_peers(vi->dev);
5141                 virtnet_ack_link_announce(vi);
5142         }
5143
5144         /* Ignore unknown (future) status bits */
5145         v &= VIRTIO_NET_S_LINK_UP;
5146
5147         if (vi->status == v)
5148                 return;
5149
5150         vi->status = v;
5151
5152         if (vi->status & VIRTIO_NET_S_LINK_UP) {
5153                 virtnet_update_settings(vi);
5154                 netif_carrier_on(vi->dev);
5155                 netif_tx_wake_all_queues(vi->dev);
5156         } else {
5157                 netif_carrier_off(vi->dev);
5158                 netif_tx_stop_all_queues(vi->dev);
5159         }
5160 }
5161
5162 static void virtnet_config_changed(struct virtio_device *vdev)
5163 {
5164         struct virtnet_info *vi = vdev->priv;
5165
5166         schedule_work(&vi->config_work);
5167 }
5168
5169 static void virtnet_free_queues(struct virtnet_info *vi)
5170 {
5171         int i;
5172
5173         for (i = 0; i < vi->max_queue_pairs; i++) {
5174                 __netif_napi_del(&vi->rq[i].napi);
5175                 __netif_napi_del(&vi->sq[i].napi);
5176         }
5177
5178         /* We called __netif_napi_del(),
5179          * we need to respect an RCU grace period before freeing vi->rq
5180          */
5181         synchronize_net();
5182
5183         kfree(vi->rq);
5184         kfree(vi->sq);
5185         kfree(vi->ctrl);
5186 }
5187
5188 static void _free_receive_bufs(struct virtnet_info *vi)
5189 {
5190         struct bpf_prog *old_prog;
5191         int i;
5192
5193         for (i = 0; i < vi->max_queue_pairs; i++) {
5194                 while (vi->rq[i].pages)
5195                         __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
5196
5197                 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
5198                 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
5199                 if (old_prog)
5200                         bpf_prog_put(old_prog);
5201         }
5202 }
5203
5204 static void free_receive_bufs(struct virtnet_info *vi)
5205 {
5206         rtnl_lock();
5207         _free_receive_bufs(vi);
5208         rtnl_unlock();
5209 }
5210
5211 static void free_receive_page_frags(struct virtnet_info *vi)
5212 {
5213         int i;
5214         for (i = 0; i < vi->max_queue_pairs; i++)
5215                 if (vi->rq[i].alloc_frag.page) {
5216                         if (vi->rq[i].last_dma)
5217                                 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
5218                         put_page(vi->rq[i].alloc_frag.page);
5219                 }
5220 }
5221
5222 static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
5223 {
5224         if (!is_xdp_frame(buf))
5225                 dev_kfree_skb(buf);
5226         else
5227                 xdp_return_frame(ptr_to_xdp(buf));
5228 }
5229
5230 static void free_unused_bufs(struct virtnet_info *vi)
5231 {
5232         void *buf;
5233         int i;
5234
5235         for (i = 0; i < vi->max_queue_pairs; i++) {
5236                 struct virtqueue *vq = vi->sq[i].vq;
5237                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
5238                         virtnet_sq_free_unused_buf(vq, buf);
5239                 cond_resched();
5240         }
5241
5242         for (i = 0; i < vi->max_queue_pairs; i++) {
5243                 struct virtqueue *vq = vi->rq[i].vq;
5244
5245                 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
5246                         virtnet_rq_unmap_free_buf(vq, buf);
5247                 cond_resched();
5248         }
5249 }
5250
5251 static void virtnet_del_vqs(struct virtnet_info *vi)
5252 {
5253         struct virtio_device *vdev = vi->vdev;
5254
5255         virtnet_clean_affinity(vi);
5256
5257         vdev->config->del_vqs(vdev);
5258
5259         virtnet_free_queues(vi);
5260 }
5261
5262 /* How large should a single buffer be so a queue full of these can fit at
5263  * least one full packet?
5264  * Logic below assumes the mergeable buffer header is used.
5265  */
5266 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
5267 {
5268         const unsigned int hdr_len = vi->hdr_len;
5269         unsigned int rq_size = virtqueue_get_vring_size(vq);
5270         unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
5271         unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
5272         unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
5273
5274         return max(max(min_buf_len, hdr_len) - hdr_len,
5275                    (unsigned int)GOOD_PACKET_LEN);
5276 }
5277
5278 static int virtnet_find_vqs(struct virtnet_info *vi)
5279 {
5280         vq_callback_t **callbacks;
5281         struct virtqueue **vqs;
5282         const char **names;
5283         int ret = -ENOMEM;
5284         int total_vqs;
5285         bool *ctx;
5286         u16 i;
5287
5288         /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
5289          * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
5290          * possible control vq.
5291          */
5292         total_vqs = vi->max_queue_pairs * 2 +
5293                     virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
5294
5295         /* Allocate space for find_vqs parameters */
5296         vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
5297         if (!vqs)
5298                 goto err_vq;
5299         callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
5300         if (!callbacks)
5301                 goto err_callback;
5302         names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
5303         if (!names)
5304                 goto err_names;
5305         if (!vi->big_packets || vi->mergeable_rx_bufs) {
5306                 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
5307                 if (!ctx)
5308                         goto err_ctx;
5309         } else {
5310                 ctx = NULL;
5311         }
5312
5313         /* Parameters for control virtqueue, if any */
5314         if (vi->has_cvq) {
5315                 callbacks[total_vqs - 1] = NULL;
5316                 names[total_vqs - 1] = "control";
5317         }
5318
5319         /* Allocate/initialize parameters for send/receive virtqueues */
5320         for (i = 0; i < vi->max_queue_pairs; i++) {
5321                 callbacks[rxq2vq(i)] = skb_recv_done;
5322                 callbacks[txq2vq(i)] = skb_xmit_done;
5323                 sprintf(vi->rq[i].name, "input.%u", i);
5324                 sprintf(vi->sq[i].name, "output.%u", i);
5325                 names[rxq2vq(i)] = vi->rq[i].name;
5326                 names[txq2vq(i)] = vi->sq[i].name;
5327                 if (ctx)
5328                         ctx[rxq2vq(i)] = true;
5329         }
5330
5331         ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
5332                                   names, ctx, NULL);
5333         if (ret)
5334                 goto err_find;
5335
5336         if (vi->has_cvq) {
5337                 vi->cvq = vqs[total_vqs - 1];
5338                 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
5339                         vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5340         }
5341
5342         for (i = 0; i < vi->max_queue_pairs; i++) {
5343                 vi->rq[i].vq = vqs[rxq2vq(i)];
5344                 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
5345                 vi->sq[i].vq = vqs[txq2vq(i)];
5346         }
5347
5348         /* run here: ret == 0. */
5349
5350
5351 err_find:
5352         kfree(ctx);
5353 err_ctx:
5354         kfree(names);
5355 err_names:
5356         kfree(callbacks);
5357 err_callback:
5358         kfree(vqs);
5359 err_vq:
5360         return ret;
5361 }
5362
5363 static int virtnet_alloc_queues(struct virtnet_info *vi)
5364 {
5365         int i;
5366
5367         if (vi->has_cvq) {
5368                 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
5369                 if (!vi->ctrl)
5370                         goto err_ctrl;
5371         } else {
5372                 vi->ctrl = NULL;
5373         }
5374         vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
5375         if (!vi->sq)
5376                 goto err_sq;
5377         vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
5378         if (!vi->rq)
5379                 goto err_rq;
5380
5381         INIT_DELAYED_WORK(&vi->refill, refill_work);
5382         for (i = 0; i < vi->max_queue_pairs; i++) {
5383                 vi->rq[i].pages = NULL;
5384                 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
5385                                       napi_weight);
5386                 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
5387                                          virtnet_poll_tx,
5388                                          napi_tx ? napi_weight : 0);
5389
5390                 INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
5391                 vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5392
5393                 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
5394                 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
5395                 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
5396
5397                 u64_stats_init(&vi->rq[i].stats.syncp);
5398                 u64_stats_init(&vi->sq[i].stats.syncp);
5399                 mutex_init(&vi->rq[i].dim_lock);
5400         }
5401
5402         return 0;
5403
5404 err_rq:
5405         kfree(vi->sq);
5406 err_sq:
5407         kfree(vi->ctrl);
5408 err_ctrl:
5409         return -ENOMEM;
5410 }
5411
5412 static int init_vqs(struct virtnet_info *vi)
5413 {
5414         int ret;
5415
5416         /* Allocate send & receive queues */
5417         ret = virtnet_alloc_queues(vi);
5418         if (ret)
5419                 goto err;
5420
5421         ret = virtnet_find_vqs(vi);
5422         if (ret)
5423                 goto err_free;
5424
5425         virtnet_rq_set_premapped(vi);
5426
5427         cpus_read_lock();
5428         virtnet_set_affinity(vi);
5429         cpus_read_unlock();
5430
5431         return 0;
5432
5433 err_free:
5434         virtnet_free_queues(vi);
5435 err:
5436         return ret;
5437 }
5438
5439 #ifdef CONFIG_SYSFS
5440 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
5441                 char *buf)
5442 {
5443         struct virtnet_info *vi = netdev_priv(queue->dev);
5444         unsigned int queue_index = get_netdev_rx_queue_index(queue);
5445         unsigned int headroom = virtnet_get_headroom(vi);
5446         unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
5447         struct ewma_pkt_len *avg;
5448
5449         BUG_ON(queue_index >= vi->max_queue_pairs);
5450         avg = &vi->rq[queue_index].mrg_avg_pkt_len;
5451         return sprintf(buf, "%u\n",
5452                        get_mergeable_buf_len(&vi->rq[queue_index], avg,
5453                                        SKB_DATA_ALIGN(headroom + tailroom)));
5454 }
5455
5456 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
5457         __ATTR_RO(mergeable_rx_buffer_size);
5458
5459 static struct attribute *virtio_net_mrg_rx_attrs[] = {
5460         &mergeable_rx_buffer_size_attribute.attr,
5461         NULL
5462 };
5463
5464 static const struct attribute_group virtio_net_mrg_rx_group = {
5465         .name = "virtio_net",
5466         .attrs = virtio_net_mrg_rx_attrs
5467 };
5468 #endif
5469
5470 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
5471                                     unsigned int fbit,
5472                                     const char *fname, const char *dname)
5473 {
5474         if (!virtio_has_feature(vdev, fbit))
5475                 return false;
5476
5477         dev_err(&vdev->dev, "device advertises feature %s but not %s",
5478                 fname, dname);
5479
5480         return true;
5481 }
5482
5483 #define VIRTNET_FAIL_ON(vdev, fbit, dbit)                       \
5484         virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
5485
5486 static bool virtnet_validate_features(struct virtio_device *vdev)
5487 {
5488         if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
5489             (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
5490                              "VIRTIO_NET_F_CTRL_VQ") ||
5491              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
5492                              "VIRTIO_NET_F_CTRL_VQ") ||
5493              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
5494                              "VIRTIO_NET_F_CTRL_VQ") ||
5495              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
5496              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
5497                              "VIRTIO_NET_F_CTRL_VQ") ||
5498              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_RSS,
5499                              "VIRTIO_NET_F_CTRL_VQ") ||
5500              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_HASH_REPORT,
5501                              "VIRTIO_NET_F_CTRL_VQ") ||
5502              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_NOTF_COAL,
5503                              "VIRTIO_NET_F_CTRL_VQ") ||
5504              VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_VQ_NOTF_COAL,
5505                              "VIRTIO_NET_F_CTRL_VQ"))) {
5506                 return false;
5507         }
5508
5509         return true;
5510 }
5511
5512 #define MIN_MTU ETH_MIN_MTU
5513 #define MAX_MTU ETH_MAX_MTU
5514
5515 static int virtnet_validate(struct virtio_device *vdev)
5516 {
5517         if (!vdev->config->get) {
5518                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
5519                         __func__);
5520                 return -EINVAL;
5521         }
5522
5523         if (!virtnet_validate_features(vdev))
5524                 return -EINVAL;
5525
5526         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
5527                 int mtu = virtio_cread16(vdev,
5528                                          offsetof(struct virtio_net_config,
5529                                                   mtu));
5530                 if (mtu < MIN_MTU)
5531                         __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
5532         }
5533
5534         if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
5535             !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
5536                 dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
5537                 __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
5538         }
5539
5540         return 0;
5541 }
5542
5543 static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
5544 {
5545         return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
5546                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
5547                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
5548                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
5549                 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
5550                 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
5551 }
5552
5553 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
5554 {
5555         bool guest_gso = virtnet_check_guest_gso(vi);
5556
5557         /* If device can receive ANY guest GSO packets, regardless of mtu,
5558          * allocate packets of maximum size, otherwise limit it to only
5559          * mtu size worth only.
5560          */
5561         if (mtu > ETH_DATA_LEN || guest_gso) {
5562                 vi->big_packets = true;
5563                 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
5564         }
5565 }
5566
5567 #define VIRTIO_NET_HASH_REPORT_MAX_TABLE      10
5568 static enum xdp_rss_hash_type
5569 virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] = {
5570         [VIRTIO_NET_HASH_REPORT_NONE] = XDP_RSS_TYPE_NONE,
5571         [VIRTIO_NET_HASH_REPORT_IPv4] = XDP_RSS_TYPE_L3_IPV4,
5572         [VIRTIO_NET_HASH_REPORT_TCPv4] = XDP_RSS_TYPE_L4_IPV4_TCP,
5573         [VIRTIO_NET_HASH_REPORT_UDPv4] = XDP_RSS_TYPE_L4_IPV4_UDP,
5574         [VIRTIO_NET_HASH_REPORT_IPv6] = XDP_RSS_TYPE_L3_IPV6,
5575         [VIRTIO_NET_HASH_REPORT_TCPv6] = XDP_RSS_TYPE_L4_IPV6_TCP,
5576         [VIRTIO_NET_HASH_REPORT_UDPv6] = XDP_RSS_TYPE_L4_IPV6_UDP,
5577         [VIRTIO_NET_HASH_REPORT_IPv6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
5578         [VIRTIO_NET_HASH_REPORT_TCPv6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
5579         [VIRTIO_NET_HASH_REPORT_UDPv6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX
5580 };
5581
5582 static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
5583                                enum xdp_rss_hash_type *rss_type)
5584 {
5585         const struct xdp_buff *xdp = (void *)_ctx;
5586         struct virtio_net_hdr_v1_hash *hdr_hash;
5587         struct virtnet_info *vi;
5588         u16 hash_report;
5589
5590         if (!(xdp->rxq->dev->features & NETIF_F_RXHASH))
5591                 return -ENODATA;
5592
5593         vi = netdev_priv(xdp->rxq->dev);
5594         hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len);
5595         hash_report = __le16_to_cpu(hdr_hash->hash_report);
5596
5597         if (hash_report >= VIRTIO_NET_HASH_REPORT_MAX_TABLE)
5598                 hash_report = VIRTIO_NET_HASH_REPORT_NONE;
5599
5600         *rss_type = virtnet_xdp_rss_type[hash_report];
5601         *hash = __le32_to_cpu(hdr_hash->hash_value);
5602         return 0;
5603 }
5604
5605 static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
5606         .xmo_rx_hash                    = virtnet_xdp_rx_hash,
5607 };
5608
5609 static int virtnet_probe(struct virtio_device *vdev)
5610 {
5611         int i, err = -ENOMEM;
5612         struct net_device *dev;
5613         struct virtnet_info *vi;
5614         u16 max_queue_pairs;
5615         int mtu = 0;
5616
5617         /* Find if host supports multiqueue/rss virtio_net device */
5618         max_queue_pairs = 1;
5619         if (virtio_has_feature(vdev, VIRTIO_NET_F_MQ) || virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
5620                 max_queue_pairs =
5621                      virtio_cread16(vdev, offsetof(struct virtio_net_config, max_virtqueue_pairs));
5622
5623         /* We need at least 2 queue's */
5624         if (max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
5625             max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
5626             !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
5627                 max_queue_pairs = 1;
5628
5629         /* Allocate ourselves a network device with room for our info */
5630         dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
5631         if (!dev)
5632                 return -ENOMEM;
5633
5634         /* Set up network device as normal. */
5635         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
5636                            IFF_TX_SKB_NO_LINEAR;
5637         dev->netdev_ops = &virtnet_netdev;
5638         dev->stat_ops = &virtnet_stat_ops;
5639         dev->features = NETIF_F_HIGHDMA;
5640
5641         dev->ethtool_ops = &virtnet_ethtool_ops;
5642         SET_NETDEV_DEV(dev, &vdev->dev);
5643
5644         /* Do we support "hardware" checksums? */
5645         if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
5646                 /* This opens up the world of extra features. */
5647                 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5648                 if (csum)
5649                         dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5650
5651                 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
5652                         dev->hw_features |= NETIF_F_TSO
5653                                 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
5654                 }
5655                 /* Individual feature bits: what can host handle? */
5656                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
5657                         dev->hw_features |= NETIF_F_TSO;
5658                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
5659                         dev->hw_features |= NETIF_F_TSO6;
5660                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
5661                         dev->hw_features |= NETIF_F_TSO_ECN;
5662                 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_USO))
5663                         dev->hw_features |= NETIF_F_GSO_UDP_L4;
5664
5665                 dev->features |= NETIF_F_GSO_ROBUST;
5666
5667                 if (gso)
5668                         dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
5669                 /* (!csum && gso) case will be fixed by register_netdev() */
5670         }
5671         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
5672                 dev->features |= NETIF_F_RXCSUM;
5673         if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
5674             virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
5675                 dev->features |= NETIF_F_GRO_HW;
5676         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
5677                 dev->hw_features |= NETIF_F_GRO_HW;
5678
5679         dev->vlan_features = dev->features;
5680         dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
5681
5682         /* MTU range: 68 - 65535 */
5683         dev->min_mtu = MIN_MTU;
5684         dev->max_mtu = MAX_MTU;
5685
5686         /* Configuration may specify what MAC to use.  Otherwise random. */
5687         if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
5688                 u8 addr[ETH_ALEN];
5689
5690                 virtio_cread_bytes(vdev,
5691                                    offsetof(struct virtio_net_config, mac),
5692                                    addr, ETH_ALEN);
5693                 eth_hw_addr_set(dev, addr);
5694         } else {
5695                 eth_hw_addr_random(dev);
5696                 dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
5697                          dev->dev_addr);
5698         }
5699
5700         /* Set up our device-specific information */
5701         vi = netdev_priv(dev);
5702         vi->dev = dev;
5703         vi->vdev = vdev;
5704         vdev->priv = vi;
5705
5706         INIT_WORK(&vi->config_work, virtnet_config_changed_work);
5707         INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
5708         spin_lock_init(&vi->refill_lock);
5709
5710         if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
5711                 vi->mergeable_rx_bufs = true;
5712                 dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
5713         }
5714
5715         if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
5716                 vi->has_rss_hash_report = true;
5717
5718         if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
5719                 vi->has_rss = true;
5720
5721                 vi->rss_indir_table_size =
5722                         virtio_cread16(vdev, offsetof(struct virtio_net_config,
5723                                 rss_max_indirection_table_length));
5724         }
5725
5726         if (vi->has_rss || vi->has_rss_hash_report) {
5727                 vi->rss_key_size =
5728                         virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
5729
5730                 vi->rss_hash_types_supported =
5731                     virtio_cread32(vdev, offsetof(struct virtio_net_config, supported_hash_types));
5732                 vi->rss_hash_types_supported &=
5733                                 ~(VIRTIO_NET_RSS_HASH_TYPE_IP_EX |
5734                                   VIRTIO_NET_RSS_HASH_TYPE_TCP_EX |
5735                                   VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
5736
5737                 dev->hw_features |= NETIF_F_RXHASH;
5738                 dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
5739         }
5740
5741         if (vi->has_rss_hash_report)
5742                 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
5743         else if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
5744                  virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
5745                 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
5746         else
5747                 vi->hdr_len = sizeof(struct virtio_net_hdr);
5748
5749         if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
5750             virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
5751                 vi->any_header_sg = true;
5752
5753         if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
5754                 vi->has_cvq = true;
5755
5756         mutex_init(&vi->cvq_lock);
5757
5758         if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
5759                 mtu = virtio_cread16(vdev,
5760                                      offsetof(struct virtio_net_config,
5761                                               mtu));
5762                 if (mtu < dev->min_mtu) {
5763                         /* Should never trigger: MTU was previously validated
5764                          * in virtnet_validate.
5765                          */
5766                         dev_err(&vdev->dev,
5767                                 "device MTU appears to have changed it is now %d < %d",
5768                                 mtu, dev->min_mtu);
5769                         err = -EINVAL;
5770                         goto free;
5771                 }
5772
5773                 dev->mtu = mtu;
5774                 dev->max_mtu = mtu;
5775         }
5776
5777         virtnet_set_big_packets(vi, mtu);
5778
5779         if (vi->any_header_sg)
5780                 dev->needed_headroom = vi->hdr_len;
5781
5782         /* Enable multiqueue by default */
5783         if (num_online_cpus() >= max_queue_pairs)
5784                 vi->curr_queue_pairs = max_queue_pairs;
5785         else
5786                 vi->curr_queue_pairs = num_online_cpus();
5787         vi->max_queue_pairs = max_queue_pairs;
5788
5789         /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
5790         err = init_vqs(vi);
5791         if (err)
5792                 goto free;
5793
5794         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
5795                 vi->intr_coal_rx.max_usecs = 0;
5796                 vi->intr_coal_tx.max_usecs = 0;
5797                 vi->intr_coal_rx.max_packets = 0;
5798
5799                 /* Keep the default values of the coalescing parameters
5800                  * aligned with the default napi_tx state.
5801                  */
5802                 if (vi->sq[0].napi.weight)
5803                         vi->intr_coal_tx.max_packets = 1;
5804                 else
5805                         vi->intr_coal_tx.max_packets = 0;
5806         }
5807
5808         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
5809                 /* The reason is the same as VIRTIO_NET_F_NOTF_COAL. */
5810                 for (i = 0; i < vi->max_queue_pairs; i++)
5811                         if (vi->sq[i].napi.weight)
5812                                 vi->sq[i].intr_coal.max_packets = 1;
5813         }
5814
5815 #ifdef CONFIG_SYSFS
5816         if (vi->mergeable_rx_bufs)
5817                 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
5818 #endif
5819         netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
5820         netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
5821
5822         virtnet_init_settings(dev);
5823
5824         if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
5825                 vi->failover = net_failover_create(vi->dev);
5826                 if (IS_ERR(vi->failover)) {
5827                         err = PTR_ERR(vi->failover);
5828                         goto free_vqs;
5829                 }
5830         }
5831
5832         if (vi->has_rss || vi->has_rss_hash_report)
5833                 virtnet_init_default_rss(vi);
5834
5835         enable_rx_mode_work(vi);
5836
5837         /* serialize netdev register + virtio_device_ready() with ndo_open() */
5838         rtnl_lock();
5839
5840         err = register_netdevice(dev);
5841         if (err) {
5842                 pr_debug("virtio_net: registering device failed\n");
5843                 rtnl_unlock();
5844                 goto free_failover;
5845         }
5846
5847         virtio_device_ready(vdev);
5848
5849         virtnet_set_queues(vi, vi->curr_queue_pairs);
5850
5851         /* a random MAC address has been assigned, notify the device.
5852          * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
5853          * because many devices work fine without getting MAC explicitly
5854          */
5855         if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
5856             virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
5857                 struct scatterlist sg;
5858
5859                 sg_init_one(&sg, dev->dev_addr, dev->addr_len);
5860                 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
5861                                           VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
5862                         pr_debug("virtio_net: setting MAC address failed\n");
5863                         rtnl_unlock();
5864                         err = -EINVAL;
5865                         goto free_unregister_netdev;
5866                 }
5867         }
5868
5869         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) {
5870                 struct virtio_net_stats_capabilities *stats_cap  __free(kfree) = NULL;
5871                 struct scatterlist sg;
5872                 __le64 v;
5873
5874                 stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL);
5875                 if (!stats_cap) {
5876                         rtnl_unlock();
5877                         err = -ENOMEM;
5878                         goto free_unregister_netdev;
5879                 }
5880
5881                 sg_init_one(&sg, stats_cap, sizeof(*stats_cap));
5882
5883                 if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
5884                                                 VIRTIO_NET_CTRL_STATS_QUERY,
5885                                                 NULL, &sg)) {
5886                         pr_debug("virtio_net: fail to get stats capability\n");
5887                         rtnl_unlock();
5888                         err = -EINVAL;
5889                         goto free_unregister_netdev;
5890                 }
5891
5892                 v = stats_cap->supported_stats_types[0];
5893                 vi->device_stats_cap = le64_to_cpu(v);
5894         }
5895
5896         rtnl_unlock();
5897
5898         err = virtnet_cpu_notif_add(vi);
5899         if (err) {
5900                 pr_debug("virtio_net: registering cpu notifier failed\n");
5901                 goto free_unregister_netdev;
5902         }
5903
5904         /* Assume link up if device can't report link status,
5905            otherwise get link status from config. */
5906         netif_carrier_off(dev);
5907         if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
5908                 schedule_work(&vi->config_work);
5909         } else {
5910                 vi->status = VIRTIO_NET_S_LINK_UP;
5911                 virtnet_update_settings(vi);
5912                 netif_carrier_on(dev);
5913         }
5914
5915         for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
5916                 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
5917                         set_bit(guest_offloads[i], &vi->guest_offloads);
5918         vi->guest_offloads_capable = vi->guest_offloads;
5919
5920         pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
5921                  dev->name, max_queue_pairs);
5922
5923         return 0;
5924
5925 free_unregister_netdev:
5926         unregister_netdev(dev);
5927 free_failover:
5928         net_failover_destroy(vi->failover);
5929 free_vqs:
5930         virtio_reset_device(vdev);
5931         cancel_delayed_work_sync(&vi->refill);
5932         free_receive_page_frags(vi);
5933         virtnet_del_vqs(vi);
5934 free:
5935         free_netdev(dev);
5936         return err;
5937 }
5938
5939 static void remove_vq_common(struct virtnet_info *vi)
5940 {
5941         virtio_reset_device(vi->vdev);
5942
5943         /* Free unused buffers in both send and recv, if any. */
5944         free_unused_bufs(vi);
5945
5946         free_receive_bufs(vi);
5947
5948         free_receive_page_frags(vi);
5949
5950         virtnet_del_vqs(vi);
5951 }
5952
5953 static void virtnet_remove(struct virtio_device *vdev)
5954 {
5955         struct virtnet_info *vi = vdev->priv;
5956
5957         virtnet_cpu_notif_remove(vi);
5958
5959         /* Make sure no work handler is accessing the device. */
5960         flush_work(&vi->config_work);
5961         disable_rx_mode_work(vi);
5962         flush_work(&vi->rx_mode_work);
5963
5964         unregister_netdev(vi->dev);
5965
5966         net_failover_destroy(vi->failover);
5967
5968         remove_vq_common(vi);
5969
5970         free_netdev(vi->dev);
5971 }
5972
5973 static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
5974 {
5975         struct virtnet_info *vi = vdev->priv;
5976
5977         virtnet_cpu_notif_remove(vi);
5978         virtnet_freeze_down(vdev);
5979         remove_vq_common(vi);
5980
5981         return 0;
5982 }
5983
5984 static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
5985 {
5986         struct virtnet_info *vi = vdev->priv;
5987         int err;
5988
5989         err = virtnet_restore_up(vdev);
5990         if (err)
5991                 return err;
5992         virtnet_set_queues(vi, vi->curr_queue_pairs);
5993
5994         err = virtnet_cpu_notif_add(vi);
5995         if (err) {
5996                 virtnet_freeze_down(vdev);
5997                 remove_vq_common(vi);
5998                 return err;
5999         }
6000
6001         return 0;
6002 }
6003
6004 static struct virtio_device_id id_table[] = {
6005         { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
6006         { 0 },
6007 };
6008
6009 #define VIRTNET_FEATURES \
6010         VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
6011         VIRTIO_NET_F_MAC, \
6012         VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
6013         VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
6014         VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
6015         VIRTIO_NET_F_HOST_USO, VIRTIO_NET_F_GUEST_USO4, VIRTIO_NET_F_GUEST_USO6, \
6016         VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
6017         VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
6018         VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
6019         VIRTIO_NET_F_CTRL_MAC_ADDR, \
6020         VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
6021         VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
6022         VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
6023         VIRTIO_NET_F_VQ_NOTF_COAL, \
6024         VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
6025
6026 static unsigned int features[] = {
6027         VIRTNET_FEATURES,
6028 };
6029
6030 static unsigned int features_legacy[] = {
6031         VIRTNET_FEATURES,
6032         VIRTIO_NET_F_GSO,
6033         VIRTIO_F_ANY_LAYOUT,
6034 };
6035
6036 static struct virtio_driver virtio_net_driver = {
6037         .feature_table = features,
6038         .feature_table_size = ARRAY_SIZE(features),
6039         .feature_table_legacy = features_legacy,
6040         .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
6041         .driver.name =  KBUILD_MODNAME,
6042         .driver.owner = THIS_MODULE,
6043         .id_table =     id_table,
6044         .validate =     virtnet_validate,
6045         .probe =        virtnet_probe,
6046         .remove =       virtnet_remove,
6047         .config_changed = virtnet_config_changed,
6048 #ifdef CONFIG_PM_SLEEP
6049         .freeze =       virtnet_freeze,
6050         .restore =      virtnet_restore,
6051 #endif
6052 };
6053
6054 static __init int virtio_net_driver_init(void)
6055 {
6056         int ret;
6057
6058         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
6059                                       virtnet_cpu_online,
6060                                       virtnet_cpu_down_prep);
6061         if (ret < 0)
6062                 goto out;
6063         virtionet_online = ret;
6064         ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
6065                                       NULL, virtnet_cpu_dead);
6066         if (ret)
6067                 goto err_dead;
6068         ret = register_virtio_driver(&virtio_net_driver);
6069         if (ret)
6070                 goto err_virtio;
6071         return 0;
6072 err_virtio:
6073         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
6074 err_dead:
6075         cpuhp_remove_multi_state(virtionet_online);
6076 out:
6077         return ret;
6078 }
6079 module_init(virtio_net_driver_init);
6080
6081 static __exit void virtio_net_driver_exit(void)
6082 {
6083         unregister_virtio_driver(&virtio_net_driver);
6084         cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
6085         cpuhp_remove_multi_state(virtionet_online);
6086 }
6087 module_exit(virtio_net_driver_exit);
6088
6089 MODULE_DEVICE_TABLE(virtio, id_table);
6090 MODULE_DESCRIPTION("Virtio network driver");
6091 MODULE_LICENSE("GPL");
This page took 0.385517 seconds and 4 git commands to generate.