]> Git Repo - linux.git/blob - drivers/net/xen-netfront.c
Merge tag 'irq-core-2025-01-21' into loongarch-next
[linux.git] / drivers / net / xen-netfront.c
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 #include <linux/bpf.h>
48 #include <net/page_pool/types.h>
49 #include <linux/bpf_trace.h>
50
51 #include <xen/xen.h>
52 #include <xen/xenbus.h>
53 #include <xen/events.h>
54 #include <xen/page.h>
55 #include <xen/platform_pci.h>
56 #include <xen/grant_table.h>
57
58 #include <xen/interface/io/netif.h>
59 #include <xen/interface/memory.h>
60 #include <xen/interface/grant_table.h>
61
62 /* Module parameters */
63 #define MAX_QUEUES_DEFAULT 8
64 static unsigned int xennet_max_queues;
65 module_param_named(max_queues, xennet_max_queues, uint, 0644);
66 MODULE_PARM_DESC(max_queues,
67                  "Maximum number of queues per virtual interface");
68
69 static bool __read_mostly xennet_trusted = true;
70 module_param_named(trusted, xennet_trusted, bool, 0644);
71 MODULE_PARM_DESC(trusted, "Is the backend trusted");
72
73 #define XENNET_TIMEOUT  (5 * HZ)
74
75 static const struct ethtool_ops xennet_ethtool_ops;
76
77 struct netfront_cb {
78         int pull_to;
79 };
80
81 #define NETFRONT_SKB_CB(skb)    ((struct netfront_cb *)((skb)->cb))
82
83 #define RX_COPY_THRESHOLD 256
84
85 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
86 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
87
88 /* Minimum number of Rx slots (includes slot for GSO metadata). */
89 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
90
91 /* Queue name is interface name with "-qNNN" appended */
92 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
93
94 /* IRQ name is queue name with "-tx" or "-rx" appended */
95 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
96
97 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
98
99 struct netfront_stats {
100         u64                     packets;
101         u64                     bytes;
102         struct u64_stats_sync   syncp;
103 };
104
105 struct netfront_info;
106
107 struct netfront_queue {
108         unsigned int id; /* Queue ID, 0-based */
109         char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
110         struct netfront_info *info;
111
112         struct bpf_prog __rcu *xdp_prog;
113
114         struct napi_struct napi;
115
116         /* Split event channels support, tx_* == rx_* when using
117          * single event channel.
118          */
119         unsigned int tx_evtchn, rx_evtchn;
120         unsigned int tx_irq, rx_irq;
121         /* Only used when split event channels support is enabled */
122         char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
123         char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
124
125         spinlock_t   tx_lock;
126         struct xen_netif_tx_front_ring tx;
127         int tx_ring_ref;
128
129         /*
130          * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
131          * are linked from tx_skb_freelist through tx_link.
132          */
133         struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
134         unsigned short tx_link[NET_TX_RING_SIZE];
135 #define TX_LINK_NONE 0xffff
136 #define TX_PENDING   0xfffe
137         grant_ref_t gref_tx_head;
138         grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
139         struct page *grant_tx_page[NET_TX_RING_SIZE];
140         unsigned tx_skb_freelist;
141         unsigned int tx_pend_queue;
142
143         spinlock_t   rx_lock ____cacheline_aligned_in_smp;
144         struct xen_netif_rx_front_ring rx;
145         int rx_ring_ref;
146
147         struct timer_list rx_refill_timer;
148
149         struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
150         grant_ref_t gref_rx_head;
151         grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
152
153         unsigned int rx_rsp_unconsumed;
154         spinlock_t rx_cons_lock;
155
156         struct page_pool *page_pool;
157         struct xdp_rxq_info xdp_rxq;
158 };
159
160 struct netfront_info {
161         struct list_head list;
162         struct net_device *netdev;
163
164         struct xenbus_device *xbdev;
165
166         /* Multi-queue support */
167         struct netfront_queue *queues;
168
169         /* Statistics */
170         struct netfront_stats __percpu *rx_stats;
171         struct netfront_stats __percpu *tx_stats;
172
173         /* XDP state */
174         bool netback_has_xdp_headroom;
175         bool netfront_xdp_enabled;
176
177         /* Is device behaving sane? */
178         bool broken;
179
180         /* Should skbs be bounced into a zeroed buffer? */
181         bool bounce;
182
183         atomic_t rx_gso_checksum_fixup;
184 };
185
186 struct netfront_rx_info {
187         struct xen_netif_rx_response rx;
188         struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
189 };
190
191 /*
192  * Access macros for acquiring freeing slots in tx_skbs[].
193  */
194
195 static void add_id_to_list(unsigned *head, unsigned short *list,
196                            unsigned short id)
197 {
198         list[id] = *head;
199         *head = id;
200 }
201
202 static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
203 {
204         unsigned int id = *head;
205
206         if (id != TX_LINK_NONE) {
207                 *head = list[id];
208                 list[id] = TX_LINK_NONE;
209         }
210         return id;
211 }
212
213 static int xennet_rxidx(RING_IDX idx)
214 {
215         return idx & (NET_RX_RING_SIZE - 1);
216 }
217
218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
219                                          RING_IDX ri)
220 {
221         int i = xennet_rxidx(ri);
222         struct sk_buff *skb = queue->rx_skbs[i];
223         queue->rx_skbs[i] = NULL;
224         return skb;
225 }
226
227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
228                                             RING_IDX ri)
229 {
230         int i = xennet_rxidx(ri);
231         grant_ref_t ref = queue->grant_rx_ref[i];
232         queue->grant_rx_ref[i] = INVALID_GRANT_REF;
233         return ref;
234 }
235
236 #ifdef CONFIG_SYSFS
237 static const struct attribute_group xennet_dev_group;
238 #endif
239
240 static bool xennet_can_sg(struct net_device *dev)
241 {
242         return dev->features & NETIF_F_SG;
243 }
244
245
246 static void rx_refill_timeout(struct timer_list *t)
247 {
248         struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
249         napi_schedule(&queue->napi);
250 }
251
252 static int netfront_tx_slot_available(struct netfront_queue *queue)
253 {
254         return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
255                 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
256 }
257
258 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
259 {
260         struct net_device *dev = queue->info->netdev;
261         struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
262
263         if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
264             netfront_tx_slot_available(queue) &&
265             likely(netif_running(dev)))
266                 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
267 }
268
269
270 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
271 {
272         struct sk_buff *skb;
273         struct page *page;
274
275         skb = __netdev_alloc_skb(queue->info->netdev,
276                                  RX_COPY_THRESHOLD + NET_IP_ALIGN,
277                                  GFP_ATOMIC | __GFP_NOWARN);
278         if (unlikely(!skb))
279                 return NULL;
280
281         page = page_pool_alloc_pages(queue->page_pool,
282                                      GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
283         if (unlikely(!page)) {
284                 kfree_skb(skb);
285                 return NULL;
286         }
287         skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
288         skb_mark_for_recycle(skb);
289
290         /* Align ip header to a 16 bytes boundary */
291         skb_reserve(skb, NET_IP_ALIGN);
292         skb->dev = queue->info->netdev;
293
294         return skb;
295 }
296
297
298 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
299 {
300         RING_IDX req_prod = queue->rx.req_prod_pvt;
301         int notify;
302         int err = 0;
303
304         if (unlikely(!netif_carrier_ok(queue->info->netdev)))
305                 return;
306
307         for (req_prod = queue->rx.req_prod_pvt;
308              req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
309              req_prod++) {
310                 struct sk_buff *skb;
311                 unsigned short id;
312                 grant_ref_t ref;
313                 struct page *page;
314                 struct xen_netif_rx_request *req;
315
316                 skb = xennet_alloc_one_rx_buffer(queue);
317                 if (!skb) {
318                         err = -ENOMEM;
319                         break;
320                 }
321
322                 id = xennet_rxidx(req_prod);
323
324                 BUG_ON(queue->rx_skbs[id]);
325                 queue->rx_skbs[id] = skb;
326
327                 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
328                 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
329                 queue->grant_rx_ref[id] = ref;
330
331                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
332
333                 req = RING_GET_REQUEST(&queue->rx, req_prod);
334                 gnttab_page_grant_foreign_access_ref_one(ref,
335                                                          queue->info->xbdev->otherend_id,
336                                                          page,
337                                                          0);
338                 req->id = id;
339                 req->gref = ref;
340         }
341
342         queue->rx.req_prod_pvt = req_prod;
343
344         /* Try again later if there are not enough requests or skb allocation
345          * failed.
346          * Enough requests is quantified as the sum of newly created slots and
347          * the unconsumed slots at the backend.
348          */
349         if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
350             unlikely(err)) {
351                 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
352                 return;
353         }
354
355         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
356         if (notify)
357                 notify_remote_via_irq(queue->rx_irq);
358 }
359
360 static int xennet_open(struct net_device *dev)
361 {
362         struct netfront_info *np = netdev_priv(dev);
363         unsigned int num_queues = dev->real_num_tx_queues;
364         unsigned int i = 0;
365         struct netfront_queue *queue = NULL;
366
367         if (!np->queues || np->broken)
368                 return -ENODEV;
369
370         for (i = 0; i < num_queues; ++i) {
371                 queue = &np->queues[i];
372                 napi_enable(&queue->napi);
373
374                 spin_lock_bh(&queue->rx_lock);
375                 if (netif_carrier_ok(dev)) {
376                         xennet_alloc_rx_buffers(queue);
377                         queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
378                         if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
379                                 napi_schedule(&queue->napi);
380                 }
381                 spin_unlock_bh(&queue->rx_lock);
382         }
383
384         netif_tx_start_all_queues(dev);
385
386         return 0;
387 }
388
389 static bool xennet_tx_buf_gc(struct netfront_queue *queue)
390 {
391         RING_IDX cons, prod;
392         unsigned short id;
393         struct sk_buff *skb;
394         bool more_to_do;
395         bool work_done = false;
396         const struct device *dev = &queue->info->netdev->dev;
397
398         BUG_ON(!netif_carrier_ok(queue->info->netdev));
399
400         do {
401                 prod = queue->tx.sring->rsp_prod;
402                 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
403                         dev_alert(dev, "Illegal number of responses %u\n",
404                                   prod - queue->tx.rsp_cons);
405                         goto err;
406                 }
407                 rmb(); /* Ensure we see responses up to 'rp'. */
408
409                 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
410                         struct xen_netif_tx_response txrsp;
411
412                         work_done = true;
413
414                         RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
415                         if (txrsp.status == XEN_NETIF_RSP_NULL)
416                                 continue;
417
418                         id = txrsp.id;
419                         if (id >= RING_SIZE(&queue->tx)) {
420                                 dev_alert(dev,
421                                           "Response has incorrect id (%u)\n",
422                                           id);
423                                 goto err;
424                         }
425                         if (queue->tx_link[id] != TX_PENDING) {
426                                 dev_alert(dev,
427                                           "Response for inactive request\n");
428                                 goto err;
429                         }
430
431                         queue->tx_link[id] = TX_LINK_NONE;
432                         skb = queue->tx_skbs[id];
433                         queue->tx_skbs[id] = NULL;
434                         if (unlikely(!gnttab_end_foreign_access_ref(
435                                 queue->grant_tx_ref[id]))) {
436                                 dev_alert(dev,
437                                           "Grant still in use by backend domain\n");
438                                 goto err;
439                         }
440                         gnttab_release_grant_reference(
441                                 &queue->gref_tx_head, queue->grant_tx_ref[id]);
442                         queue->grant_tx_ref[id] = INVALID_GRANT_REF;
443                         queue->grant_tx_page[id] = NULL;
444                         add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
445                         dev_kfree_skb_irq(skb);
446                 }
447
448                 queue->tx.rsp_cons = prod;
449
450                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
451         } while (more_to_do);
452
453         xennet_maybe_wake_tx(queue);
454
455         return work_done;
456
457  err:
458         queue->info->broken = true;
459         dev_alert(dev, "Disabled for further use\n");
460
461         return work_done;
462 }
463
464 struct xennet_gnttab_make_txreq {
465         struct netfront_queue *queue;
466         struct sk_buff *skb;
467         struct page *page;
468         struct xen_netif_tx_request *tx;      /* Last request on ring page */
469         struct xen_netif_tx_request tx_local; /* Last request local copy*/
470         unsigned int size;
471 };
472
473 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
474                                   unsigned int len, void *data)
475 {
476         struct xennet_gnttab_make_txreq *info = data;
477         unsigned int id;
478         struct xen_netif_tx_request *tx;
479         grant_ref_t ref;
480         /* convenient aliases */
481         struct page *page = info->page;
482         struct netfront_queue *queue = info->queue;
483         struct sk_buff *skb = info->skb;
484
485         id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
486         tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
487         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
488         WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
489
490         gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
491                                         gfn, GNTMAP_readonly);
492
493         queue->tx_skbs[id] = skb;
494         queue->grant_tx_page[id] = page;
495         queue->grant_tx_ref[id] = ref;
496
497         info->tx_local.id = id;
498         info->tx_local.gref = ref;
499         info->tx_local.offset = offset;
500         info->tx_local.size = len;
501         info->tx_local.flags = 0;
502
503         *tx = info->tx_local;
504
505         /*
506          * Put the request in the pending queue, it will be set to be pending
507          * when the producer index is about to be raised.
508          */
509         add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
510
511         info->tx = tx;
512         info->size += info->tx_local.size;
513 }
514
515 static struct xen_netif_tx_request *xennet_make_first_txreq(
516         struct xennet_gnttab_make_txreq *info,
517         unsigned int offset, unsigned int len)
518 {
519         info->size = 0;
520
521         gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
522
523         return info->tx;
524 }
525
526 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
527                                   unsigned int len, void *data)
528 {
529         struct xennet_gnttab_make_txreq *info = data;
530
531         info->tx->flags |= XEN_NETTXF_more_data;
532         skb_get(info->skb);
533         xennet_tx_setup_grant(gfn, offset, len, data);
534 }
535
536 static void xennet_make_txreqs(
537         struct xennet_gnttab_make_txreq *info,
538         struct page *page,
539         unsigned int offset, unsigned int len)
540 {
541         /* Skip unused frames from start of page */
542         page += offset >> PAGE_SHIFT;
543         offset &= ~PAGE_MASK;
544
545         while (len) {
546                 info->page = page;
547                 info->size = 0;
548
549                 gnttab_foreach_grant_in_range(page, offset, len,
550                                               xennet_make_one_txreq,
551                                               info);
552
553                 page++;
554                 offset = 0;
555                 len -= info->size;
556         }
557 }
558
559 /*
560  * Count how many ring slots are required to send this skb. Each frag
561  * might be a compound page.
562  */
563 static int xennet_count_skb_slots(struct sk_buff *skb)
564 {
565         int i, frags = skb_shinfo(skb)->nr_frags;
566         int slots;
567
568         slots = gnttab_count_grant(offset_in_page(skb->data),
569                                    skb_headlen(skb));
570
571         for (i = 0; i < frags; i++) {
572                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
573                 unsigned long size = skb_frag_size(frag);
574                 unsigned long offset = skb_frag_off(frag);
575
576                 /* Skip unused frames from start of page */
577                 offset &= ~PAGE_MASK;
578
579                 slots += gnttab_count_grant(offset, size);
580         }
581
582         return slots;
583 }
584
585 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
586                                struct net_device *sb_dev)
587 {
588         unsigned int num_queues = dev->real_num_tx_queues;
589         u32 hash;
590         u16 queue_idx;
591
592         /* First, check if there is only one queue */
593         if (num_queues == 1) {
594                 queue_idx = 0;
595         } else {
596                 hash = skb_get_hash(skb);
597                 queue_idx = hash % num_queues;
598         }
599
600         return queue_idx;
601 }
602
603 static void xennet_mark_tx_pending(struct netfront_queue *queue)
604 {
605         unsigned int i;
606
607         while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
608                TX_LINK_NONE)
609                 queue->tx_link[i] = TX_PENDING;
610 }
611
612 static int xennet_xdp_xmit_one(struct net_device *dev,
613                                struct netfront_queue *queue,
614                                struct xdp_frame *xdpf)
615 {
616         struct netfront_info *np = netdev_priv(dev);
617         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
618         struct xennet_gnttab_make_txreq info = {
619                 .queue = queue,
620                 .skb = NULL,
621                 .page = virt_to_page(xdpf->data),
622         };
623         int notify;
624
625         xennet_make_first_txreq(&info,
626                                 offset_in_page(xdpf->data),
627                                 xdpf->len);
628
629         xennet_mark_tx_pending(queue);
630
631         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
632         if (notify)
633                 notify_remote_via_irq(queue->tx_irq);
634
635         u64_stats_update_begin(&tx_stats->syncp);
636         tx_stats->bytes += xdpf->len;
637         tx_stats->packets++;
638         u64_stats_update_end(&tx_stats->syncp);
639
640         xennet_tx_buf_gc(queue);
641
642         return 0;
643 }
644
645 static int xennet_xdp_xmit(struct net_device *dev, int n,
646                            struct xdp_frame **frames, u32 flags)
647 {
648         unsigned int num_queues = dev->real_num_tx_queues;
649         struct netfront_info *np = netdev_priv(dev);
650         struct netfront_queue *queue = NULL;
651         unsigned long irq_flags;
652         int nxmit = 0;
653         int i;
654
655         if (unlikely(np->broken))
656                 return -ENODEV;
657         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
658                 return -EINVAL;
659
660         queue = &np->queues[smp_processor_id() % num_queues];
661
662         spin_lock_irqsave(&queue->tx_lock, irq_flags);
663         for (i = 0; i < n; i++) {
664                 struct xdp_frame *xdpf = frames[i];
665
666                 if (!xdpf)
667                         continue;
668                 if (xennet_xdp_xmit_one(dev, queue, xdpf))
669                         break;
670                 nxmit++;
671         }
672         spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
673
674         return nxmit;
675 }
676
677 static struct sk_buff *bounce_skb(const struct sk_buff *skb)
678 {
679         unsigned int headerlen = skb_headroom(skb);
680         /* Align size to allocate full pages and avoid contiguous data leaks */
681         unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
682                                   XEN_PAGE_SIZE);
683         struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
684
685         if (!n)
686                 return NULL;
687
688         if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
689                 WARN_ONCE(1, "misaligned skb allocated\n");
690                 kfree_skb(n);
691                 return NULL;
692         }
693
694         /* Set the data pointer */
695         skb_reserve(n, headerlen);
696         /* Set the tail pointer and length */
697         skb_put(n, skb->len);
698
699         BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
700
701         skb_copy_header(n, skb);
702         return n;
703 }
704
705 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
706
707 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
708 {
709         struct netfront_info *np = netdev_priv(dev);
710         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
711         struct xen_netif_tx_request *first_tx;
712         unsigned int i;
713         int notify;
714         int slots;
715         struct page *page;
716         unsigned int offset;
717         unsigned int len;
718         unsigned long flags;
719         struct netfront_queue *queue = NULL;
720         struct xennet_gnttab_make_txreq info = { };
721         unsigned int num_queues = dev->real_num_tx_queues;
722         u16 queue_index;
723         struct sk_buff *nskb;
724
725         /* Drop the packet if no queues are set up */
726         if (num_queues < 1)
727                 goto drop;
728         if (unlikely(np->broken))
729                 goto drop;
730         /* Determine which queue to transmit this SKB on */
731         queue_index = skb_get_queue_mapping(skb);
732         queue = &np->queues[queue_index];
733
734         /* If skb->len is too big for wire format, drop skb and alert
735          * user about misconfiguration.
736          */
737         if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
738                 net_alert_ratelimited(
739                         "xennet: skb->len = %u, too big for wire format\n",
740                         skb->len);
741                 goto drop;
742         }
743
744         slots = xennet_count_skb_slots(skb);
745         if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
746                 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
747                                     slots, skb->len);
748                 if (skb_linearize(skb))
749                         goto drop;
750         }
751
752         page = virt_to_page(skb->data);
753         offset = offset_in_page(skb->data);
754
755         /* The first req should be at least ETH_HLEN size or the packet will be
756          * dropped by netback.
757          *
758          * If the backend is not trusted bounce all data to zeroed pages to
759          * avoid exposing contiguous data on the granted page not belonging to
760          * the skb.
761          */
762         if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
763                 nskb = bounce_skb(skb);
764                 if (!nskb)
765                         goto drop;
766                 dev_consume_skb_any(skb);
767                 skb = nskb;
768                 page = virt_to_page(skb->data);
769                 offset = offset_in_page(skb->data);
770         }
771
772         len = skb_headlen(skb);
773
774         spin_lock_irqsave(&queue->tx_lock, flags);
775
776         if (unlikely(!netif_carrier_ok(dev) ||
777                      (slots > 1 && !xennet_can_sg(dev)) ||
778                      netif_needs_gso(skb, netif_skb_features(skb)))) {
779                 spin_unlock_irqrestore(&queue->tx_lock, flags);
780                 goto drop;
781         }
782
783         /* First request for the linear area. */
784         info.queue = queue;
785         info.skb = skb;
786         info.page = page;
787         first_tx = xennet_make_first_txreq(&info, offset, len);
788         offset += info.tx_local.size;
789         if (offset == PAGE_SIZE) {
790                 page++;
791                 offset = 0;
792         }
793         len -= info.tx_local.size;
794
795         if (skb->ip_summed == CHECKSUM_PARTIAL)
796                 /* local packet? */
797                 first_tx->flags |= XEN_NETTXF_csum_blank |
798                                    XEN_NETTXF_data_validated;
799         else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
800                 /* remote but checksummed. */
801                 first_tx->flags |= XEN_NETTXF_data_validated;
802
803         /* Optional extra info after the first request. */
804         if (skb_shinfo(skb)->gso_size) {
805                 struct xen_netif_extra_info *gso;
806
807                 gso = (struct xen_netif_extra_info *)
808                         RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
809
810                 first_tx->flags |= XEN_NETTXF_extra_info;
811
812                 gso->u.gso.size = skb_shinfo(skb)->gso_size;
813                 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
814                         XEN_NETIF_GSO_TYPE_TCPV6 :
815                         XEN_NETIF_GSO_TYPE_TCPV4;
816                 gso->u.gso.pad = 0;
817                 gso->u.gso.features = 0;
818
819                 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
820                 gso->flags = 0;
821         }
822
823         /* Requests for the rest of the linear area. */
824         xennet_make_txreqs(&info, page, offset, len);
825
826         /* Requests for all the frags. */
827         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
828                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
829                 xennet_make_txreqs(&info, skb_frag_page(frag),
830                                         skb_frag_off(frag),
831                                         skb_frag_size(frag));
832         }
833
834         /* First request has the packet length. */
835         first_tx->size = skb->len;
836
837         /* timestamp packet in software */
838         skb_tx_timestamp(skb);
839
840         xennet_mark_tx_pending(queue);
841
842         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
843         if (notify)
844                 notify_remote_via_irq(queue->tx_irq);
845
846         u64_stats_update_begin(&tx_stats->syncp);
847         tx_stats->bytes += skb->len;
848         tx_stats->packets++;
849         u64_stats_update_end(&tx_stats->syncp);
850
851         /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
852         xennet_tx_buf_gc(queue);
853
854         if (!netfront_tx_slot_available(queue))
855                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
856
857         spin_unlock_irqrestore(&queue->tx_lock, flags);
858
859         return NETDEV_TX_OK;
860
861  drop:
862         dev->stats.tx_dropped++;
863         dev_kfree_skb_any(skb);
864         return NETDEV_TX_OK;
865 }
866
867 static int xennet_close(struct net_device *dev)
868 {
869         struct netfront_info *np = netdev_priv(dev);
870         unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
871         unsigned int i;
872         struct netfront_queue *queue;
873         netif_tx_stop_all_queues(np->netdev);
874         for (i = 0; i < num_queues; ++i) {
875                 queue = &np->queues[i];
876                 napi_disable(&queue->napi);
877         }
878         return 0;
879 }
880
881 static void xennet_destroy_queues(struct netfront_info *info)
882 {
883         unsigned int i;
884
885         if (!info->queues)
886                 return;
887
888         for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
889                 struct netfront_queue *queue = &info->queues[i];
890
891                 if (netif_running(info->netdev))
892                         napi_disable(&queue->napi);
893                 netif_napi_del(&queue->napi);
894         }
895
896         kfree(info->queues);
897         info->queues = NULL;
898 }
899
900 static void xennet_uninit(struct net_device *dev)
901 {
902         struct netfront_info *np = netdev_priv(dev);
903         xennet_destroy_queues(np);
904 }
905
906 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
907 {
908         unsigned long flags;
909
910         spin_lock_irqsave(&queue->rx_cons_lock, flags);
911         queue->rx.rsp_cons = val;
912         queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
913         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
914 }
915
916 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
917                                 grant_ref_t ref)
918 {
919         int new = xennet_rxidx(queue->rx.req_prod_pvt);
920
921         BUG_ON(queue->rx_skbs[new]);
922         queue->rx_skbs[new] = skb;
923         queue->grant_rx_ref[new] = ref;
924         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
925         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
926         queue->rx.req_prod_pvt++;
927 }
928
929 static int xennet_get_extras(struct netfront_queue *queue,
930                              struct xen_netif_extra_info *extras,
931                              RING_IDX rp)
932
933 {
934         struct xen_netif_extra_info extra;
935         struct device *dev = &queue->info->netdev->dev;
936         RING_IDX cons = queue->rx.rsp_cons;
937         int err = 0;
938
939         do {
940                 struct sk_buff *skb;
941                 grant_ref_t ref;
942
943                 if (unlikely(cons + 1 == rp)) {
944                         if (net_ratelimit())
945                                 dev_warn(dev, "Missing extra info\n");
946                         err = -EBADR;
947                         break;
948                 }
949
950                 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
951
952                 if (unlikely(!extra.type ||
953                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
954                         if (net_ratelimit())
955                                 dev_warn(dev, "Invalid extra type: %d\n",
956                                          extra.type);
957                         err = -EINVAL;
958                 } else {
959                         extras[extra.type - 1] = extra;
960                 }
961
962                 skb = xennet_get_rx_skb(queue, cons);
963                 ref = xennet_get_rx_ref(queue, cons);
964                 xennet_move_rx_slot(queue, skb, ref);
965         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
966
967         xennet_set_rx_rsp_cons(queue, cons);
968         return err;
969 }
970
971 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
972                    struct xen_netif_rx_response *rx, struct bpf_prog *prog,
973                    struct xdp_buff *xdp, bool *need_xdp_flush)
974 {
975         struct xdp_frame *xdpf;
976         u32 len = rx->status;
977         u32 act;
978         int err;
979
980         xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
981                       &queue->xdp_rxq);
982         xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
983                          len, false);
984
985         act = bpf_prog_run_xdp(prog, xdp);
986         switch (act) {
987         case XDP_TX:
988                 get_page(pdata);
989                 xdpf = xdp_convert_buff_to_frame(xdp);
990                 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
991                 if (unlikely(!err))
992                         xdp_return_frame_rx_napi(xdpf);
993                 else if (unlikely(err < 0))
994                         trace_xdp_exception(queue->info->netdev, prog, act);
995                 break;
996         case XDP_REDIRECT:
997                 get_page(pdata);
998                 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
999                 *need_xdp_flush = true;
1000                 if (unlikely(err))
1001                         trace_xdp_exception(queue->info->netdev, prog, act);
1002                 break;
1003         case XDP_PASS:
1004         case XDP_DROP:
1005                 break;
1006
1007         case XDP_ABORTED:
1008                 trace_xdp_exception(queue->info->netdev, prog, act);
1009                 break;
1010
1011         default:
1012                 bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act);
1013         }
1014
1015         return act;
1016 }
1017
1018 static int xennet_get_responses(struct netfront_queue *queue,
1019                                 struct netfront_rx_info *rinfo, RING_IDX rp,
1020                                 struct sk_buff_head *list,
1021                                 bool *need_xdp_flush)
1022 {
1023         struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1024         int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
1025         RING_IDX cons = queue->rx.rsp_cons;
1026         struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1027         struct xen_netif_extra_info *extras = rinfo->extras;
1028         grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1029         struct device *dev = &queue->info->netdev->dev;
1030         struct bpf_prog *xdp_prog;
1031         struct xdp_buff xdp;
1032         int slots = 1;
1033         int err = 0;
1034         u32 verdict;
1035
1036         if (rx->flags & XEN_NETRXF_extra_info) {
1037                 err = xennet_get_extras(queue, extras, rp);
1038                 if (!err) {
1039                         if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
1040                                 struct xen_netif_extra_info *xdp;
1041
1042                                 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
1043                                 rx->offset = xdp->u.xdp.headroom;
1044                         }
1045                 }
1046                 cons = queue->rx.rsp_cons;
1047         }
1048
1049         for (;;) {
1050                 /*
1051                  * This definitely indicates a bug, either in this driver or in
1052                  * the backend driver. In future this should flag the bad
1053                  * situation to the system controller to reboot the backend.
1054                  */
1055                 if (ref == INVALID_GRANT_REF) {
1056                         if (net_ratelimit())
1057                                 dev_warn(dev, "Bad rx response id %d.\n",
1058                                          rx->id);
1059                         err = -EINVAL;
1060                         goto next;
1061                 }
1062
1063                 if (unlikely(rx->status < 0 ||
1064                              rx->offset + rx->status > XEN_PAGE_SIZE)) {
1065                         if (net_ratelimit())
1066                                 dev_warn(dev, "rx->offset: %u, size: %d\n",
1067                                          rx->offset, rx->status);
1068                         xennet_move_rx_slot(queue, skb, ref);
1069                         err = -EINVAL;
1070                         goto next;
1071                 }
1072
1073                 if (!gnttab_end_foreign_access_ref(ref)) {
1074                         dev_alert(dev,
1075                                   "Grant still in use by backend domain\n");
1076                         queue->info->broken = true;
1077                         dev_alert(dev, "Disabled for further use\n");
1078                         return -EINVAL;
1079                 }
1080
1081                 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1082
1083                 rcu_read_lock();
1084                 xdp_prog = rcu_dereference(queue->xdp_prog);
1085                 if (xdp_prog) {
1086                         if (!(rx->flags & XEN_NETRXF_more_data)) {
1087                                 /* currently only a single page contains data */
1088                                 verdict = xennet_run_xdp(queue,
1089                                                          skb_frag_page(&skb_shinfo(skb)->frags[0]),
1090                                                          rx, xdp_prog, &xdp, need_xdp_flush);
1091                                 if (verdict != XDP_PASS)
1092                                         err = -EINVAL;
1093                         } else {
1094                                 /* drop the frame */
1095                                 err = -EINVAL;
1096                         }
1097                 }
1098                 rcu_read_unlock();
1099
1100                 __skb_queue_tail(list, skb);
1101
1102 next:
1103                 if (!(rx->flags & XEN_NETRXF_more_data))
1104                         break;
1105
1106                 if (cons + slots == rp) {
1107                         if (net_ratelimit())
1108                                 dev_warn(dev, "Need more slots\n");
1109                         err = -ENOENT;
1110                         break;
1111                 }
1112
1113                 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1114                 rx = &rx_local;
1115                 skb = xennet_get_rx_skb(queue, cons + slots);
1116                 ref = xennet_get_rx_ref(queue, cons + slots);
1117                 slots++;
1118         }
1119
1120         if (unlikely(slots > max)) {
1121                 if (net_ratelimit())
1122                         dev_warn(dev, "Too many slots\n");
1123                 err = -E2BIG;
1124         }
1125
1126         if (unlikely(err))
1127                 xennet_set_rx_rsp_cons(queue, cons + slots);
1128
1129         return err;
1130 }
1131
1132 static int xennet_set_skb_gso(struct sk_buff *skb,
1133                               struct xen_netif_extra_info *gso)
1134 {
1135         if (!gso->u.gso.size) {
1136                 if (net_ratelimit())
1137                         pr_warn("GSO size must not be zero\n");
1138                 return -EINVAL;
1139         }
1140
1141         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1142             gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1143                 if (net_ratelimit())
1144                         pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1145                 return -EINVAL;
1146         }
1147
1148         skb_shinfo(skb)->gso_size = gso->u.gso.size;
1149         skb_shinfo(skb)->gso_type =
1150                 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1151                 SKB_GSO_TCPV4 :
1152                 SKB_GSO_TCPV6;
1153
1154         /* Header must be checked, and gso_segs computed. */
1155         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1156         skb_shinfo(skb)->gso_segs = 0;
1157
1158         return 0;
1159 }
1160
1161 static int xennet_fill_frags(struct netfront_queue *queue,
1162                              struct sk_buff *skb,
1163                              struct sk_buff_head *list)
1164 {
1165         RING_IDX cons = queue->rx.rsp_cons;
1166         struct sk_buff *nskb;
1167
1168         while ((nskb = __skb_dequeue(list))) {
1169                 struct xen_netif_rx_response rx;
1170                 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1171
1172                 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1173
1174                 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1175                         unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1176
1177                         BUG_ON(pull_to < skb_headlen(skb));
1178                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1179                 }
1180                 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1181                         xennet_set_rx_rsp_cons(queue,
1182                                                ++cons + skb_queue_len(list));
1183                         kfree_skb(nskb);
1184                         return -ENOENT;
1185                 }
1186
1187                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1188                                 skb_frag_page(nfrag),
1189                                 rx.offset, rx.status, PAGE_SIZE);
1190
1191                 skb_shinfo(nskb)->nr_frags = 0;
1192                 kfree_skb(nskb);
1193         }
1194
1195         xennet_set_rx_rsp_cons(queue, cons);
1196
1197         return 0;
1198 }
1199
1200 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1201 {
1202         bool recalculate_partial_csum = false;
1203
1204         /*
1205          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1206          * peers can fail to set NETRXF_csum_blank when sending a GSO
1207          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1208          * recalculate the partial checksum.
1209          */
1210         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1211                 struct netfront_info *np = netdev_priv(dev);
1212                 atomic_inc(&np->rx_gso_checksum_fixup);
1213                 skb->ip_summed = CHECKSUM_PARTIAL;
1214                 recalculate_partial_csum = true;
1215         }
1216
1217         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1218         if (skb->ip_summed != CHECKSUM_PARTIAL)
1219                 return 0;
1220
1221         return skb_checksum_setup(skb, recalculate_partial_csum);
1222 }
1223
1224 static int handle_incoming_queue(struct netfront_queue *queue,
1225                                  struct sk_buff_head *rxq)
1226 {
1227         struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1228         int packets_dropped = 0;
1229         struct sk_buff *skb;
1230
1231         while ((skb = __skb_dequeue(rxq)) != NULL) {
1232                 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1233
1234                 if (pull_to > skb_headlen(skb))
1235                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1236
1237                 /* Ethernet work: Delayed to here as it peeks the header. */
1238                 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1239                 skb_reset_network_header(skb);
1240
1241                 if (checksum_setup(queue->info->netdev, skb)) {
1242                         kfree_skb(skb);
1243                         packets_dropped++;
1244                         queue->info->netdev->stats.rx_errors++;
1245                         continue;
1246                 }
1247
1248                 u64_stats_update_begin(&rx_stats->syncp);
1249                 rx_stats->packets++;
1250                 rx_stats->bytes += skb->len;
1251                 u64_stats_update_end(&rx_stats->syncp);
1252
1253                 /* Pass it up. */
1254                 napi_gro_receive(&queue->napi, skb);
1255         }
1256
1257         return packets_dropped;
1258 }
1259
1260 static int xennet_poll(struct napi_struct *napi, int budget)
1261 {
1262         struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1263         struct net_device *dev = queue->info->netdev;
1264         struct sk_buff *skb;
1265         struct netfront_rx_info rinfo;
1266         struct xen_netif_rx_response *rx = &rinfo.rx;
1267         struct xen_netif_extra_info *extras = rinfo.extras;
1268         RING_IDX i, rp;
1269         int work_done;
1270         struct sk_buff_head rxq;
1271         struct sk_buff_head errq;
1272         struct sk_buff_head tmpq;
1273         int err;
1274         bool need_xdp_flush = false;
1275
1276         spin_lock(&queue->rx_lock);
1277
1278         skb_queue_head_init(&rxq);
1279         skb_queue_head_init(&errq);
1280         skb_queue_head_init(&tmpq);
1281
1282         rp = queue->rx.sring->rsp_prod;
1283         if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1284                 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1285                           rp - queue->rx.rsp_cons);
1286                 queue->info->broken = true;
1287                 spin_unlock(&queue->rx_lock);
1288                 return 0;
1289         }
1290         rmb(); /* Ensure we see queued responses up to 'rp'. */
1291
1292         i = queue->rx.rsp_cons;
1293         work_done = 0;
1294         while ((i != rp) && (work_done < budget)) {
1295                 RING_COPY_RESPONSE(&queue->rx, i, rx);
1296                 memset(extras, 0, sizeof(rinfo.extras));
1297
1298                 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1299                                            &need_xdp_flush);
1300
1301                 if (unlikely(err)) {
1302                         if (queue->info->broken) {
1303                                 spin_unlock(&queue->rx_lock);
1304                                 return 0;
1305                         }
1306 err:
1307                         while ((skb = __skb_dequeue(&tmpq)))
1308                                 __skb_queue_tail(&errq, skb);
1309                         dev->stats.rx_errors++;
1310                         i = queue->rx.rsp_cons;
1311                         continue;
1312                 }
1313
1314                 skb = __skb_dequeue(&tmpq);
1315
1316                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1317                         struct xen_netif_extra_info *gso;
1318                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1319
1320                         if (unlikely(xennet_set_skb_gso(skb, gso))) {
1321                                 __skb_queue_head(&tmpq, skb);
1322                                 xennet_set_rx_rsp_cons(queue,
1323                                                        queue->rx.rsp_cons +
1324                                                        skb_queue_len(&tmpq));
1325                                 goto err;
1326                         }
1327                 }
1328
1329                 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1330                 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1331                         NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1332
1333                 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1334                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1335                 skb->data_len = rx->status;
1336                 skb->len += rx->status;
1337
1338                 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1339                         goto err;
1340
1341                 if (rx->flags & XEN_NETRXF_csum_blank)
1342                         skb->ip_summed = CHECKSUM_PARTIAL;
1343                 else if (rx->flags & XEN_NETRXF_data_validated)
1344                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1345
1346                 __skb_queue_tail(&rxq, skb);
1347
1348                 i = queue->rx.rsp_cons + 1;
1349                 xennet_set_rx_rsp_cons(queue, i);
1350                 work_done++;
1351         }
1352         if (need_xdp_flush)
1353                 xdp_do_flush();
1354
1355         __skb_queue_purge(&errq);
1356
1357         work_done -= handle_incoming_queue(queue, &rxq);
1358
1359         xennet_alloc_rx_buffers(queue);
1360
1361         if (work_done < budget) {
1362                 int more_to_do = 0;
1363
1364                 napi_complete_done(napi, work_done);
1365
1366                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1367                 if (more_to_do)
1368                         napi_schedule(napi);
1369         }
1370
1371         spin_unlock(&queue->rx_lock);
1372
1373         return work_done;
1374 }
1375
1376 static int xennet_change_mtu(struct net_device *dev, int mtu)
1377 {
1378         int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1379
1380         if (mtu > max)
1381                 return -EINVAL;
1382         WRITE_ONCE(dev->mtu, mtu);
1383         return 0;
1384 }
1385
1386 static void xennet_get_stats64(struct net_device *dev,
1387                                struct rtnl_link_stats64 *tot)
1388 {
1389         struct netfront_info *np = netdev_priv(dev);
1390         int cpu;
1391
1392         for_each_possible_cpu(cpu) {
1393                 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1394                 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1395                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1396                 unsigned int start;
1397
1398                 do {
1399                         start = u64_stats_fetch_begin(&tx_stats->syncp);
1400                         tx_packets = tx_stats->packets;
1401                         tx_bytes = tx_stats->bytes;
1402                 } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
1403
1404                 do {
1405                         start = u64_stats_fetch_begin(&rx_stats->syncp);
1406                         rx_packets = rx_stats->packets;
1407                         rx_bytes = rx_stats->bytes;
1408                 } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
1409
1410                 tot->rx_packets += rx_packets;
1411                 tot->tx_packets += tx_packets;
1412                 tot->rx_bytes   += rx_bytes;
1413                 tot->tx_bytes   += tx_bytes;
1414         }
1415
1416         tot->rx_errors  = dev->stats.rx_errors;
1417         tot->tx_dropped = dev->stats.tx_dropped;
1418 }
1419
1420 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1421 {
1422         struct sk_buff *skb;
1423         int i;
1424
1425         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1426                 /* Skip over entries which are actually freelist references */
1427                 if (!queue->tx_skbs[i])
1428                         continue;
1429
1430                 skb = queue->tx_skbs[i];
1431                 queue->tx_skbs[i] = NULL;
1432                 get_page(queue->grant_tx_page[i]);
1433                 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1434                                           queue->grant_tx_page[i]);
1435                 queue->grant_tx_page[i] = NULL;
1436                 queue->grant_tx_ref[i] = INVALID_GRANT_REF;
1437                 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1438                 dev_kfree_skb_irq(skb);
1439         }
1440 }
1441
1442 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1443 {
1444         int id, ref;
1445
1446         spin_lock_bh(&queue->rx_lock);
1447
1448         for (id = 0; id < NET_RX_RING_SIZE; id++) {
1449                 struct sk_buff *skb;
1450                 struct page *page;
1451
1452                 skb = queue->rx_skbs[id];
1453                 if (!skb)
1454                         continue;
1455
1456                 ref = queue->grant_rx_ref[id];
1457                 if (ref == INVALID_GRANT_REF)
1458                         continue;
1459
1460                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1461
1462                 /* gnttab_end_foreign_access() needs a page ref until
1463                  * foreign access is ended (which may be deferred).
1464                  */
1465                 get_page(page);
1466                 gnttab_end_foreign_access(ref, page);
1467                 queue->grant_rx_ref[id] = INVALID_GRANT_REF;
1468
1469                 kfree_skb(skb);
1470         }
1471
1472         spin_unlock_bh(&queue->rx_lock);
1473 }
1474
1475 static netdev_features_t xennet_fix_features(struct net_device *dev,
1476         netdev_features_t features)
1477 {
1478         struct netfront_info *np = netdev_priv(dev);
1479
1480         if (features & NETIF_F_SG &&
1481             !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1482                 features &= ~NETIF_F_SG;
1483
1484         if (features & NETIF_F_IPV6_CSUM &&
1485             !xenbus_read_unsigned(np->xbdev->otherend,
1486                                   "feature-ipv6-csum-offload", 0))
1487                 features &= ~NETIF_F_IPV6_CSUM;
1488
1489         if (features & NETIF_F_TSO &&
1490             !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1491                 features &= ~NETIF_F_TSO;
1492
1493         if (features & NETIF_F_TSO6 &&
1494             !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1495                 features &= ~NETIF_F_TSO6;
1496
1497         return features;
1498 }
1499
1500 static int xennet_set_features(struct net_device *dev,
1501         netdev_features_t features)
1502 {
1503         if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1504                 netdev_info(dev, "Reducing MTU because no SG offload");
1505                 dev->mtu = ETH_DATA_LEN;
1506         }
1507
1508         return 0;
1509 }
1510
1511 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1512 {
1513         unsigned long flags;
1514
1515         if (unlikely(queue->info->broken))
1516                 return false;
1517
1518         spin_lock_irqsave(&queue->tx_lock, flags);
1519         if (xennet_tx_buf_gc(queue))
1520                 *eoi = 0;
1521         spin_unlock_irqrestore(&queue->tx_lock, flags);
1522
1523         return true;
1524 }
1525
1526 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1527 {
1528         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1529
1530         if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1531                 xen_irq_lateeoi(irq, eoiflag);
1532
1533         return IRQ_HANDLED;
1534 }
1535
1536 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1537 {
1538         unsigned int work_queued;
1539         unsigned long flags;
1540
1541         if (unlikely(queue->info->broken))
1542                 return false;
1543
1544         spin_lock_irqsave(&queue->rx_cons_lock, flags);
1545         work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
1546         if (work_queued > queue->rx_rsp_unconsumed) {
1547                 queue->rx_rsp_unconsumed = work_queued;
1548                 *eoi = 0;
1549         } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1550                 const struct device *dev = &queue->info->netdev->dev;
1551
1552                 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1553                 dev_alert(dev, "RX producer index going backwards\n");
1554                 dev_alert(dev, "Disabled for further use\n");
1555                 queue->info->broken = true;
1556                 return false;
1557         }
1558         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1559
1560         if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1561                 napi_schedule(&queue->napi);
1562
1563         return true;
1564 }
1565
1566 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1567 {
1568         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1569
1570         if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1571                 xen_irq_lateeoi(irq, eoiflag);
1572
1573         return IRQ_HANDLED;
1574 }
1575
1576 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1577 {
1578         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1579
1580         if (xennet_handle_tx(dev_id, &eoiflag) &&
1581             xennet_handle_rx(dev_id, &eoiflag))
1582                 xen_irq_lateeoi(irq, eoiflag);
1583
1584         return IRQ_HANDLED;
1585 }
1586
1587 #ifdef CONFIG_NET_POLL_CONTROLLER
1588 static void xennet_poll_controller(struct net_device *dev)
1589 {
1590         /* Poll each queue */
1591         struct netfront_info *info = netdev_priv(dev);
1592         unsigned int num_queues = dev->real_num_tx_queues;
1593         unsigned int i;
1594
1595         if (info->broken)
1596                 return;
1597
1598         for (i = 0; i < num_queues; ++i)
1599                 xennet_interrupt(0, &info->queues[i]);
1600 }
1601 #endif
1602
1603 #define NETBACK_XDP_HEADROOM_DISABLE    0
1604 #define NETBACK_XDP_HEADROOM_ENABLE     1
1605
1606 static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1607 {
1608         int err;
1609         unsigned short headroom;
1610
1611         headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1612         err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1613                             "xdp-headroom", "%hu",
1614                             headroom);
1615         if (err)
1616                 pr_warn("Error writing xdp-headroom\n");
1617
1618         return err;
1619 }
1620
1621 static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1622                           struct netlink_ext_ack *extack)
1623 {
1624         unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1625         struct netfront_info *np = netdev_priv(dev);
1626         struct bpf_prog *old_prog;
1627         unsigned int i, err;
1628
1629         if (dev->mtu > max_mtu) {
1630                 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1631                 return -EINVAL;
1632         }
1633
1634         if (!np->netback_has_xdp_headroom)
1635                 return 0;
1636
1637         xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1638
1639         err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1640                                   NETBACK_XDP_HEADROOM_DISABLE);
1641         if (err)
1642                 return err;
1643
1644         /* avoid the race with XDP headroom adjustment */
1645         wait_event(module_wq,
1646                    xenbus_read_driver_state(np->xbdev->otherend) ==
1647                    XenbusStateReconfigured);
1648         np->netfront_xdp_enabled = true;
1649
1650         old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1651
1652         if (prog)
1653                 bpf_prog_add(prog, dev->real_num_tx_queues);
1654
1655         for (i = 0; i < dev->real_num_tx_queues; ++i)
1656                 rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1657
1658         if (old_prog)
1659                 for (i = 0; i < dev->real_num_tx_queues; ++i)
1660                         bpf_prog_put(old_prog);
1661
1662         xenbus_switch_state(np->xbdev, XenbusStateConnected);
1663
1664         return 0;
1665 }
1666
1667 static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1668 {
1669         struct netfront_info *np = netdev_priv(dev);
1670
1671         if (np->broken)
1672                 return -ENODEV;
1673
1674         switch (xdp->command) {
1675         case XDP_SETUP_PROG:
1676                 return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1677         default:
1678                 return -EINVAL;
1679         }
1680 }
1681
1682 static const struct net_device_ops xennet_netdev_ops = {
1683         .ndo_uninit          = xennet_uninit,
1684         .ndo_open            = xennet_open,
1685         .ndo_stop            = xennet_close,
1686         .ndo_start_xmit      = xennet_start_xmit,
1687         .ndo_change_mtu      = xennet_change_mtu,
1688         .ndo_get_stats64     = xennet_get_stats64,
1689         .ndo_set_mac_address = eth_mac_addr,
1690         .ndo_validate_addr   = eth_validate_addr,
1691         .ndo_fix_features    = xennet_fix_features,
1692         .ndo_set_features    = xennet_set_features,
1693         .ndo_select_queue    = xennet_select_queue,
1694         .ndo_bpf            = xennet_xdp,
1695         .ndo_xdp_xmit       = xennet_xdp_xmit,
1696 #ifdef CONFIG_NET_POLL_CONTROLLER
1697         .ndo_poll_controller = xennet_poll_controller,
1698 #endif
1699 };
1700
1701 static void xennet_free_netdev(struct net_device *netdev)
1702 {
1703         struct netfront_info *np = netdev_priv(netdev);
1704
1705         free_percpu(np->rx_stats);
1706         free_percpu(np->tx_stats);
1707         free_netdev(netdev);
1708 }
1709
1710 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1711 {
1712         int err;
1713         struct net_device *netdev;
1714         struct netfront_info *np;
1715
1716         netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1717         if (!netdev)
1718                 return ERR_PTR(-ENOMEM);
1719
1720         np                   = netdev_priv(netdev);
1721         np->xbdev            = dev;
1722
1723         np->queues = NULL;
1724
1725         err = -ENOMEM;
1726         np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1727         if (np->rx_stats == NULL)
1728                 goto exit;
1729         np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1730         if (np->tx_stats == NULL)
1731                 goto exit;
1732
1733         netdev->netdev_ops      = &xennet_netdev_ops;
1734
1735         netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1736                                   NETIF_F_GSO_ROBUST;
1737         netdev->hw_features     = NETIF_F_SG |
1738                                   NETIF_F_IPV6_CSUM |
1739                                   NETIF_F_TSO | NETIF_F_TSO6;
1740
1741         /*
1742          * Assume that all hw features are available for now. This set
1743          * will be adjusted by the call to netdev_update_features() in
1744          * xennet_connect() which is the earliest point where we can
1745          * negotiate with the backend regarding supported features.
1746          */
1747         netdev->features |= netdev->hw_features;
1748         netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
1749                                NETDEV_XDP_ACT_NDO_XMIT;
1750
1751         netdev->ethtool_ops = &xennet_ethtool_ops;
1752         netdev->min_mtu = ETH_MIN_MTU;
1753         netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1754         SET_NETDEV_DEV(netdev, &dev->dev);
1755
1756         np->netdev = netdev;
1757         np->netfront_xdp_enabled = false;
1758
1759         netif_carrier_off(netdev);
1760
1761         do {
1762                 xenbus_switch_state(dev, XenbusStateInitialising);
1763                 err = wait_event_timeout(module_wq,
1764                                  xenbus_read_driver_state(dev->otherend) !=
1765                                  XenbusStateClosed &&
1766                                  xenbus_read_driver_state(dev->otherend) !=
1767                                  XenbusStateUnknown, XENNET_TIMEOUT);
1768         } while (!err);
1769
1770         return netdev;
1771
1772  exit:
1773         xennet_free_netdev(netdev);
1774         return ERR_PTR(err);
1775 }
1776
1777 /*
1778  * Entry point to this code when a new device is created.  Allocate the basic
1779  * structures and the ring buffers for communication with the backend, and
1780  * inform the backend of the appropriate details for those.
1781  */
1782 static int netfront_probe(struct xenbus_device *dev,
1783                           const struct xenbus_device_id *id)
1784 {
1785         int err;
1786         struct net_device *netdev;
1787         struct netfront_info *info;
1788
1789         netdev = xennet_create_dev(dev);
1790         if (IS_ERR(netdev)) {
1791                 err = PTR_ERR(netdev);
1792                 xenbus_dev_fatal(dev, err, "creating netdev");
1793                 return err;
1794         }
1795
1796         info = netdev_priv(netdev);
1797         dev_set_drvdata(&dev->dev, info);
1798 #ifdef CONFIG_SYSFS
1799         info->netdev->sysfs_groups[0] = &xennet_dev_group;
1800 #endif
1801
1802         return 0;
1803 }
1804
1805 static void xennet_end_access(int ref, void *page)
1806 {
1807         /* This frees the page as a side-effect */
1808         if (ref != INVALID_GRANT_REF)
1809                 gnttab_end_foreign_access(ref, virt_to_page(page));
1810 }
1811
1812 static void xennet_disconnect_backend(struct netfront_info *info)
1813 {
1814         unsigned int i = 0;
1815         unsigned int num_queues = info->netdev->real_num_tx_queues;
1816
1817         netif_carrier_off(info->netdev);
1818
1819         for (i = 0; i < num_queues && info->queues; ++i) {
1820                 struct netfront_queue *queue = &info->queues[i];
1821
1822                 del_timer_sync(&queue->rx_refill_timer);
1823
1824                 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1825                         unbind_from_irqhandler(queue->tx_irq, queue);
1826                 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1827                         unbind_from_irqhandler(queue->tx_irq, queue);
1828                         unbind_from_irqhandler(queue->rx_irq, queue);
1829                 }
1830                 queue->tx_evtchn = queue->rx_evtchn = 0;
1831                 queue->tx_irq = queue->rx_irq = 0;
1832
1833                 if (netif_running(info->netdev))
1834                         napi_synchronize(&queue->napi);
1835
1836                 xennet_release_tx_bufs(queue);
1837                 xennet_release_rx_bufs(queue);
1838                 gnttab_free_grant_references(queue->gref_tx_head);
1839                 gnttab_free_grant_references(queue->gref_rx_head);
1840
1841                 /* End access and free the pages */
1842                 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1843                 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1844
1845                 queue->tx_ring_ref = INVALID_GRANT_REF;
1846                 queue->rx_ring_ref = INVALID_GRANT_REF;
1847                 queue->tx.sring = NULL;
1848                 queue->rx.sring = NULL;
1849
1850                 page_pool_destroy(queue->page_pool);
1851         }
1852 }
1853
1854 /*
1855  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1856  * driver restart.  We tear down our netif structure and recreate it, but
1857  * leave the device-layer structures intact so that this is transparent to the
1858  * rest of the kernel.
1859  */
1860 static int netfront_resume(struct xenbus_device *dev)
1861 {
1862         struct netfront_info *info = dev_get_drvdata(&dev->dev);
1863
1864         dev_dbg(&dev->dev, "%s\n", dev->nodename);
1865
1866         netif_tx_lock_bh(info->netdev);
1867         netif_device_detach(info->netdev);
1868         netif_tx_unlock_bh(info->netdev);
1869
1870         xennet_disconnect_backend(info);
1871
1872         rtnl_lock();
1873         if (info->queues)
1874                 xennet_destroy_queues(info);
1875         rtnl_unlock();
1876
1877         return 0;
1878 }
1879
1880 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1881 {
1882         char *s, *e, *macstr;
1883         int i;
1884
1885         macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1886         if (IS_ERR(macstr))
1887                 return PTR_ERR(macstr);
1888
1889         for (i = 0; i < ETH_ALEN; i++) {
1890                 mac[i] = simple_strtoul(s, &e, 16);
1891                 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1892                         kfree(macstr);
1893                         return -ENOENT;
1894                 }
1895                 s = e+1;
1896         }
1897
1898         kfree(macstr);
1899         return 0;
1900 }
1901
1902 static int setup_netfront_single(struct netfront_queue *queue)
1903 {
1904         int err;
1905
1906         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1907         if (err < 0)
1908                 goto fail;
1909
1910         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1911                                                 xennet_interrupt, 0,
1912                                                 queue->info->netdev->name,
1913                                                 queue);
1914         if (err < 0)
1915                 goto bind_fail;
1916         queue->rx_evtchn = queue->tx_evtchn;
1917         queue->rx_irq = queue->tx_irq = err;
1918
1919         return 0;
1920
1921 bind_fail:
1922         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1923         queue->tx_evtchn = 0;
1924 fail:
1925         return err;
1926 }
1927
1928 static int setup_netfront_split(struct netfront_queue *queue)
1929 {
1930         int err;
1931
1932         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1933         if (err < 0)
1934                 goto fail;
1935         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1936         if (err < 0)
1937                 goto alloc_rx_evtchn_fail;
1938
1939         snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1940                  "%s-tx", queue->name);
1941         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1942                                                 xennet_tx_interrupt, 0,
1943                                                 queue->tx_irq_name, queue);
1944         if (err < 0)
1945                 goto bind_tx_fail;
1946         queue->tx_irq = err;
1947
1948         snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1949                  "%s-rx", queue->name);
1950         err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1951                                                 xennet_rx_interrupt, 0,
1952                                                 queue->rx_irq_name, queue);
1953         if (err < 0)
1954                 goto bind_rx_fail;
1955         queue->rx_irq = err;
1956
1957         return 0;
1958
1959 bind_rx_fail:
1960         unbind_from_irqhandler(queue->tx_irq, queue);
1961         queue->tx_irq = 0;
1962 bind_tx_fail:
1963         xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1964         queue->rx_evtchn = 0;
1965 alloc_rx_evtchn_fail:
1966         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1967         queue->tx_evtchn = 0;
1968 fail:
1969         return err;
1970 }
1971
1972 static int setup_netfront(struct xenbus_device *dev,
1973                         struct netfront_queue *queue, unsigned int feature_split_evtchn)
1974 {
1975         struct xen_netif_tx_sring *txs;
1976         struct xen_netif_rx_sring *rxs;
1977         int err;
1978
1979         queue->tx_ring_ref = INVALID_GRANT_REF;
1980         queue->rx_ring_ref = INVALID_GRANT_REF;
1981         queue->rx.sring = NULL;
1982         queue->tx.sring = NULL;
1983
1984         err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
1985                                 1, &queue->tx_ring_ref);
1986         if (err)
1987                 goto fail;
1988
1989         XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1990
1991         err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
1992                                 1, &queue->rx_ring_ref);
1993         if (err)
1994                 goto fail;
1995
1996         XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1997
1998         if (feature_split_evtchn)
1999                 err = setup_netfront_split(queue);
2000         /* setup single event channel if
2001          *  a) feature-split-event-channels == 0
2002          *  b) feature-split-event-channels == 1 but failed to setup
2003          */
2004         if (!feature_split_evtchn || err)
2005                 err = setup_netfront_single(queue);
2006
2007         if (err)
2008                 goto fail;
2009
2010         return 0;
2011
2012  fail:
2013         xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
2014         xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
2015
2016         return err;
2017 }
2018
2019 /* Queue-specific initialisation
2020  * This used to be done in xennet_create_dev() but must now
2021  * be run per-queue.
2022  */
2023 static int xennet_init_queue(struct netfront_queue *queue)
2024 {
2025         unsigned short i;
2026         int err = 0;
2027         char *devid;
2028
2029         spin_lock_init(&queue->tx_lock);
2030         spin_lock_init(&queue->rx_lock);
2031         spin_lock_init(&queue->rx_cons_lock);
2032
2033         timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2034
2035         devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2036         snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2037                  devid, queue->id);
2038
2039         /* Initialise tx_skb_freelist as a free chain containing every entry. */
2040         queue->tx_skb_freelist = 0;
2041         queue->tx_pend_queue = TX_LINK_NONE;
2042         for (i = 0; i < NET_TX_RING_SIZE; i++) {
2043                 queue->tx_link[i] = i + 1;
2044                 queue->grant_tx_ref[i] = INVALID_GRANT_REF;
2045                 queue->grant_tx_page[i] = NULL;
2046         }
2047         queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2048
2049         /* Clear out rx_skbs */
2050         for (i = 0; i < NET_RX_RING_SIZE; i++) {
2051                 queue->rx_skbs[i] = NULL;
2052                 queue->grant_rx_ref[i] = INVALID_GRANT_REF;
2053         }
2054
2055         /* A grant for every tx ring slot */
2056         if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2057                                           &queue->gref_tx_head) < 0) {
2058                 pr_alert("can't alloc tx grant refs\n");
2059                 err = -ENOMEM;
2060                 goto exit;
2061         }
2062
2063         /* A grant for every rx ring slot */
2064         if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2065                                           &queue->gref_rx_head) < 0) {
2066                 pr_alert("can't alloc rx grant refs\n");
2067                 err = -ENOMEM;
2068                 goto exit_free_tx;
2069         }
2070
2071         return 0;
2072
2073  exit_free_tx:
2074         gnttab_free_grant_references(queue->gref_tx_head);
2075  exit:
2076         return err;
2077 }
2078
2079 static int write_queue_xenstore_keys(struct netfront_queue *queue,
2080                            struct xenbus_transaction *xbt, int write_hierarchical)
2081 {
2082         /* Write the queue-specific keys into XenStore in the traditional
2083          * way for a single queue, or in a queue subkeys for multiple
2084          * queues.
2085          */
2086         struct xenbus_device *dev = queue->info->xbdev;
2087         int err;
2088         const char *message;
2089         char *path;
2090         size_t pathsize;
2091
2092         /* Choose the correct place to write the keys */
2093         if (write_hierarchical) {
2094                 pathsize = strlen(dev->nodename) + 10;
2095                 path = kzalloc(pathsize, GFP_KERNEL);
2096                 if (!path) {
2097                         err = -ENOMEM;
2098                         message = "out of memory while writing ring references";
2099                         goto error;
2100                 }
2101                 snprintf(path, pathsize, "%s/queue-%u",
2102                                 dev->nodename, queue->id);
2103         } else {
2104                 path = (char *)dev->nodename;
2105         }
2106
2107         /* Write ring references */
2108         err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2109                         queue->tx_ring_ref);
2110         if (err) {
2111                 message = "writing tx-ring-ref";
2112                 goto error;
2113         }
2114
2115         err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2116                         queue->rx_ring_ref);
2117         if (err) {
2118                 message = "writing rx-ring-ref";
2119                 goto error;
2120         }
2121
2122         /* Write event channels; taking into account both shared
2123          * and split event channel scenarios.
2124          */
2125         if (queue->tx_evtchn == queue->rx_evtchn) {
2126                 /* Shared event channel */
2127                 err = xenbus_printf(*xbt, path,
2128                                 "event-channel", "%u", queue->tx_evtchn);
2129                 if (err) {
2130                         message = "writing event-channel";
2131                         goto error;
2132                 }
2133         } else {
2134                 /* Split event channels */
2135                 err = xenbus_printf(*xbt, path,
2136                                 "event-channel-tx", "%u", queue->tx_evtchn);
2137                 if (err) {
2138                         message = "writing event-channel-tx";
2139                         goto error;
2140                 }
2141
2142                 err = xenbus_printf(*xbt, path,
2143                                 "event-channel-rx", "%u", queue->rx_evtchn);
2144                 if (err) {
2145                         message = "writing event-channel-rx";
2146                         goto error;
2147                 }
2148         }
2149
2150         if (write_hierarchical)
2151                 kfree(path);
2152         return 0;
2153
2154 error:
2155         if (write_hierarchical)
2156                 kfree(path);
2157         xenbus_dev_fatal(dev, err, "%s", message);
2158         return err;
2159 }
2160
2161
2162
2163 static int xennet_create_page_pool(struct netfront_queue *queue)
2164 {
2165         int err;
2166         struct page_pool_params pp_params = {
2167                 .order = 0,
2168                 .flags = 0,
2169                 .pool_size = NET_RX_RING_SIZE,
2170                 .nid = NUMA_NO_NODE,
2171                 .dev = &queue->info->netdev->dev,
2172                 .offset = XDP_PACKET_HEADROOM,
2173                 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2174         };
2175
2176         queue->page_pool = page_pool_create(&pp_params);
2177         if (IS_ERR(queue->page_pool)) {
2178                 err = PTR_ERR(queue->page_pool);
2179                 queue->page_pool = NULL;
2180                 return err;
2181         }
2182
2183         err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2184                                queue->id, 0);
2185         if (err) {
2186                 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2187                 goto err_free_pp;
2188         }
2189
2190         err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2191                                          MEM_TYPE_PAGE_POOL, queue->page_pool);
2192         if (err) {
2193                 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2194                 goto err_unregister_rxq;
2195         }
2196         return 0;
2197
2198 err_unregister_rxq:
2199         xdp_rxq_info_unreg(&queue->xdp_rxq);
2200 err_free_pp:
2201         page_pool_destroy(queue->page_pool);
2202         queue->page_pool = NULL;
2203         return err;
2204 }
2205
2206 static int xennet_create_queues(struct netfront_info *info,
2207                                 unsigned int *num_queues)
2208 {
2209         unsigned int i;
2210         int ret;
2211
2212         info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2213                                GFP_KERNEL);
2214         if (!info->queues)
2215                 return -ENOMEM;
2216
2217         for (i = 0; i < *num_queues; i++) {
2218                 struct netfront_queue *queue = &info->queues[i];
2219
2220                 queue->id = i;
2221                 queue->info = info;
2222
2223                 ret = xennet_init_queue(queue);
2224                 if (ret < 0) {
2225                         dev_warn(&info->xbdev->dev,
2226                                  "only created %d queues\n", i);
2227                         *num_queues = i;
2228                         break;
2229                 }
2230
2231                 /* use page pool recycling instead of buddy allocator */
2232                 ret = xennet_create_page_pool(queue);
2233                 if (ret < 0) {
2234                         dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2235                         *num_queues = i;
2236                         return ret;
2237                 }
2238
2239                 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
2240                 if (netif_running(info->netdev))
2241                         napi_enable(&queue->napi);
2242         }
2243
2244         netif_set_real_num_tx_queues(info->netdev, *num_queues);
2245
2246         if (*num_queues == 0) {
2247                 dev_err(&info->xbdev->dev, "no queues\n");
2248                 return -EINVAL;
2249         }
2250         return 0;
2251 }
2252
2253 /* Common code used when first setting up, and when resuming. */
2254 static int talk_to_netback(struct xenbus_device *dev,
2255                            struct netfront_info *info)
2256 {
2257         const char *message;
2258         struct xenbus_transaction xbt;
2259         int err;
2260         unsigned int feature_split_evtchn;
2261         unsigned int i = 0;
2262         unsigned int max_queues = 0;
2263         struct netfront_queue *queue = NULL;
2264         unsigned int num_queues = 1;
2265         u8 addr[ETH_ALEN];
2266
2267         info->netdev->irq = 0;
2268
2269         /* Check if backend is trusted. */
2270         info->bounce = !xennet_trusted ||
2271                        !xenbus_read_unsigned(dev->nodename, "trusted", 1);
2272
2273         /* Check if backend supports multiple queues */
2274         max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2275                                           "multi-queue-max-queues", 1);
2276         num_queues = min(max_queues, xennet_max_queues);
2277
2278         /* Check feature-split-event-channels */
2279         feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2280                                         "feature-split-event-channels", 0);
2281
2282         /* Read mac addr. */
2283         err = xen_net_read_mac(dev, addr);
2284         if (err) {
2285                 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2286                 goto out_unlocked;
2287         }
2288         eth_hw_addr_set(info->netdev, addr);
2289
2290         info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2291                                                               "feature-xdp-headroom", 0);
2292         if (info->netback_has_xdp_headroom) {
2293                 /* set the current xen-netfront xdp state */
2294                 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2295                                           NETBACK_XDP_HEADROOM_ENABLE :
2296                                           NETBACK_XDP_HEADROOM_DISABLE);
2297                 if (err)
2298                         goto out_unlocked;
2299         }
2300
2301         rtnl_lock();
2302         if (info->queues)
2303                 xennet_destroy_queues(info);
2304
2305         /* For the case of a reconnect reset the "broken" indicator. */
2306         info->broken = false;
2307
2308         err = xennet_create_queues(info, &num_queues);
2309         if (err < 0) {
2310                 xenbus_dev_fatal(dev, err, "creating queues");
2311                 kfree(info->queues);
2312                 info->queues = NULL;
2313                 goto out;
2314         }
2315         rtnl_unlock();
2316
2317         /* Create shared ring, alloc event channel -- for each queue */
2318         for (i = 0; i < num_queues; ++i) {
2319                 queue = &info->queues[i];
2320                 err = setup_netfront(dev, queue, feature_split_evtchn);
2321                 if (err)
2322                         goto destroy_ring;
2323         }
2324
2325 again:
2326         err = xenbus_transaction_start(&xbt);
2327         if (err) {
2328                 xenbus_dev_fatal(dev, err, "starting transaction");
2329                 goto destroy_ring;
2330         }
2331
2332         if (xenbus_exists(XBT_NIL,
2333                           info->xbdev->otherend, "multi-queue-max-queues")) {
2334                 /* Write the number of queues */
2335                 err = xenbus_printf(xbt, dev->nodename,
2336                                     "multi-queue-num-queues", "%u", num_queues);
2337                 if (err) {
2338                         message = "writing multi-queue-num-queues";
2339                         goto abort_transaction_no_dev_fatal;
2340                 }
2341         }
2342
2343         if (num_queues == 1) {
2344                 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2345                 if (err)
2346                         goto abort_transaction_no_dev_fatal;
2347         } else {
2348                 /* Write the keys for each queue */
2349                 for (i = 0; i < num_queues; ++i) {
2350                         queue = &info->queues[i];
2351                         err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2352                         if (err)
2353                                 goto abort_transaction_no_dev_fatal;
2354                 }
2355         }
2356
2357         /* The remaining keys are not queue-specific */
2358         err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2359                             1);
2360         if (err) {
2361                 message = "writing request-rx-copy";
2362                 goto abort_transaction;
2363         }
2364
2365         err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2366         if (err) {
2367                 message = "writing feature-rx-notify";
2368                 goto abort_transaction;
2369         }
2370
2371         err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2372         if (err) {
2373                 message = "writing feature-sg";
2374                 goto abort_transaction;
2375         }
2376
2377         err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2378         if (err) {
2379                 message = "writing feature-gso-tcpv4";
2380                 goto abort_transaction;
2381         }
2382
2383         err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2384         if (err) {
2385                 message = "writing feature-gso-tcpv6";
2386                 goto abort_transaction;
2387         }
2388
2389         err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2390                            "1");
2391         if (err) {
2392                 message = "writing feature-ipv6-csum-offload";
2393                 goto abort_transaction;
2394         }
2395
2396         err = xenbus_transaction_end(xbt, 0);
2397         if (err) {
2398                 if (err == -EAGAIN)
2399                         goto again;
2400                 xenbus_dev_fatal(dev, err, "completing transaction");
2401                 goto destroy_ring;
2402         }
2403
2404         return 0;
2405
2406  abort_transaction:
2407         xenbus_dev_fatal(dev, err, "%s", message);
2408 abort_transaction_no_dev_fatal:
2409         xenbus_transaction_end(xbt, 1);
2410  destroy_ring:
2411         xennet_disconnect_backend(info);
2412         rtnl_lock();
2413         xennet_destroy_queues(info);
2414  out:
2415         rtnl_unlock();
2416 out_unlocked:
2417         device_unregister(&dev->dev);
2418         return err;
2419 }
2420
2421 static int xennet_connect(struct net_device *dev)
2422 {
2423         struct netfront_info *np = netdev_priv(dev);
2424         unsigned int num_queues = 0;
2425         int err;
2426         unsigned int j = 0;
2427         struct netfront_queue *queue = NULL;
2428
2429         if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2430                 dev_info(&dev->dev,
2431                          "backend does not support copying receive path\n");
2432                 return -ENODEV;
2433         }
2434
2435         err = talk_to_netback(np->xbdev, np);
2436         if (err)
2437                 return err;
2438         if (np->netback_has_xdp_headroom)
2439                 pr_info("backend supports XDP headroom\n");
2440         if (np->bounce)
2441                 dev_info(&np->xbdev->dev,
2442                          "bouncing transmitted data to zeroed pages\n");
2443
2444         /* talk_to_netback() sets the correct number of queues */
2445         num_queues = dev->real_num_tx_queues;
2446
2447         if (dev->reg_state == NETREG_UNINITIALIZED) {
2448                 err = register_netdev(dev);
2449                 if (err) {
2450                         pr_warn("%s: register_netdev err=%d\n", __func__, err);
2451                         device_unregister(&np->xbdev->dev);
2452                         return err;
2453                 }
2454         }
2455
2456         rtnl_lock();
2457         netdev_update_features(dev);
2458         rtnl_unlock();
2459
2460         /*
2461          * All public and private state should now be sane.  Get
2462          * ready to start sending and receiving packets and give the driver
2463          * domain a kick because we've probably just requeued some
2464          * packets.
2465          */
2466         netif_tx_lock_bh(np->netdev);
2467         netif_device_attach(np->netdev);
2468         netif_tx_unlock_bh(np->netdev);
2469
2470         netif_carrier_on(np->netdev);
2471         for (j = 0; j < num_queues; ++j) {
2472                 queue = &np->queues[j];
2473
2474                 notify_remote_via_irq(queue->tx_irq);
2475                 if (queue->tx_irq != queue->rx_irq)
2476                         notify_remote_via_irq(queue->rx_irq);
2477
2478                 spin_lock_bh(&queue->rx_lock);
2479                 xennet_alloc_rx_buffers(queue);
2480                 spin_unlock_bh(&queue->rx_lock);
2481         }
2482
2483         return 0;
2484 }
2485
2486 /*
2487  * Callback received when the backend's state changes.
2488  */
2489 static void netback_changed(struct xenbus_device *dev,
2490                             enum xenbus_state backend_state)
2491 {
2492         struct netfront_info *np = dev_get_drvdata(&dev->dev);
2493         struct net_device *netdev = np->netdev;
2494
2495         dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2496
2497         wake_up_all(&module_wq);
2498
2499         switch (backend_state) {
2500         case XenbusStateInitialising:
2501         case XenbusStateInitialised:
2502         case XenbusStateReconfiguring:
2503         case XenbusStateReconfigured:
2504         case XenbusStateUnknown:
2505                 break;
2506
2507         case XenbusStateInitWait:
2508                 if (dev->state != XenbusStateInitialising)
2509                         break;
2510                 if (xennet_connect(netdev) != 0)
2511                         break;
2512                 xenbus_switch_state(dev, XenbusStateConnected);
2513                 break;
2514
2515         case XenbusStateConnected:
2516                 netdev_notify_peers(netdev);
2517                 break;
2518
2519         case XenbusStateClosed:
2520                 if (dev->state == XenbusStateClosed)
2521                         break;
2522                 fallthrough;    /* Missed the backend's CLOSING state */
2523         case XenbusStateClosing:
2524                 xenbus_frontend_closed(dev);
2525                 break;
2526         }
2527 }
2528
2529 static const struct xennet_stat {
2530         char name[ETH_GSTRING_LEN];
2531         u16 offset;
2532 } xennet_stats[] = {
2533         {
2534                 "rx_gso_checksum_fixup",
2535                 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2536         },
2537 };
2538
2539 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2540 {
2541         switch (string_set) {
2542         case ETH_SS_STATS:
2543                 return ARRAY_SIZE(xennet_stats);
2544         default:
2545                 return -EINVAL;
2546         }
2547 }
2548
2549 static void xennet_get_ethtool_stats(struct net_device *dev,
2550                                      struct ethtool_stats *stats, u64 * data)
2551 {
2552         void *np = netdev_priv(dev);
2553         int i;
2554
2555         for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2556                 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2557 }
2558
2559 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2560 {
2561         int i;
2562
2563         switch (stringset) {
2564         case ETH_SS_STATS:
2565                 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2566                         memcpy(data + i * ETH_GSTRING_LEN,
2567                                xennet_stats[i].name, ETH_GSTRING_LEN);
2568                 break;
2569         }
2570 }
2571
2572 static const struct ethtool_ops xennet_ethtool_ops =
2573 {
2574         .get_link = ethtool_op_get_link,
2575
2576         .get_sset_count = xennet_get_sset_count,
2577         .get_ethtool_stats = xennet_get_ethtool_stats,
2578         .get_strings = xennet_get_strings,
2579         .get_ts_info = ethtool_op_get_ts_info,
2580 };
2581
2582 #ifdef CONFIG_SYSFS
2583 static ssize_t show_rxbuf(struct device *dev,
2584                           struct device_attribute *attr, char *buf)
2585 {
2586         return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2587 }
2588
2589 static ssize_t store_rxbuf(struct device *dev,
2590                            struct device_attribute *attr,
2591                            const char *buf, size_t len)
2592 {
2593         char *endp;
2594
2595         if (!capable(CAP_NET_ADMIN))
2596                 return -EPERM;
2597
2598         simple_strtoul(buf, &endp, 0);
2599         if (endp == buf)
2600                 return -EBADMSG;
2601
2602         /* rxbuf_min and rxbuf_max are no longer configurable. */
2603
2604         return len;
2605 }
2606
2607 static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2608 static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2609 static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2610
2611 static struct attribute *xennet_dev_attrs[] = {
2612         &dev_attr_rxbuf_min.attr,
2613         &dev_attr_rxbuf_max.attr,
2614         &dev_attr_rxbuf_cur.attr,
2615         NULL
2616 };
2617
2618 static const struct attribute_group xennet_dev_group = {
2619         .attrs = xennet_dev_attrs
2620 };
2621 #endif /* CONFIG_SYSFS */
2622
2623 static void xennet_bus_close(struct xenbus_device *dev)
2624 {
2625         int ret;
2626
2627         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2628                 return;
2629         do {
2630                 xenbus_switch_state(dev, XenbusStateClosing);
2631                 ret = wait_event_timeout(module_wq,
2632                                    xenbus_read_driver_state(dev->otherend) ==
2633                                    XenbusStateClosing ||
2634                                    xenbus_read_driver_state(dev->otherend) ==
2635                                    XenbusStateClosed ||
2636                                    xenbus_read_driver_state(dev->otherend) ==
2637                                    XenbusStateUnknown,
2638                                    XENNET_TIMEOUT);
2639         } while (!ret);
2640
2641         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2642                 return;
2643
2644         do {
2645                 xenbus_switch_state(dev, XenbusStateClosed);
2646                 ret = wait_event_timeout(module_wq,
2647                                    xenbus_read_driver_state(dev->otherend) ==
2648                                    XenbusStateClosed ||
2649                                    xenbus_read_driver_state(dev->otherend) ==
2650                                    XenbusStateUnknown,
2651                                    XENNET_TIMEOUT);
2652         } while (!ret);
2653 }
2654
2655 static void xennet_remove(struct xenbus_device *dev)
2656 {
2657         struct netfront_info *info = dev_get_drvdata(&dev->dev);
2658
2659         xennet_bus_close(dev);
2660         xennet_disconnect_backend(info);
2661
2662         if (info->netdev->reg_state == NETREG_REGISTERED)
2663                 unregister_netdev(info->netdev);
2664
2665         if (info->queues) {
2666                 rtnl_lock();
2667                 xennet_destroy_queues(info);
2668                 rtnl_unlock();
2669         }
2670         xennet_free_netdev(info->netdev);
2671 }
2672
2673 static const struct xenbus_device_id netfront_ids[] = {
2674         { "vif" },
2675         { "" }
2676 };
2677
2678 static struct xenbus_driver netfront_driver = {
2679         .ids = netfront_ids,
2680         .probe = netfront_probe,
2681         .remove = xennet_remove,
2682         .resume = netfront_resume,
2683         .otherend_changed = netback_changed,
2684 };
2685
2686 static int __init netif_init(void)
2687 {
2688         if (!xen_domain())
2689                 return -ENODEV;
2690
2691         if (!xen_has_pv_nic_devices())
2692                 return -ENODEV;
2693
2694         pr_info("Initialising Xen virtual ethernet driver\n");
2695
2696         /* Allow as many queues as there are CPUs inut max. 8 if user has not
2697          * specified a value.
2698          */
2699         if (xennet_max_queues == 0)
2700                 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2701                                           num_online_cpus());
2702
2703         return xenbus_register_frontend(&netfront_driver);
2704 }
2705 module_init(netif_init);
2706
2707
2708 static void __exit netif_exit(void)
2709 {
2710         xenbus_unregister_driver(&netfront_driver);
2711 }
2712 module_exit(netif_exit);
2713
2714 MODULE_DESCRIPTION("Xen virtual network device frontend");
2715 MODULE_LICENSE("GPL");
2716 MODULE_ALIAS("xen:vif");
2717 MODULE_ALIAS("xennet");
This page took 0.188618 seconds and 4 git commands to generate.